Merge "drivers: soc: qcom: Rename compatible string for Lito"
diff --git a/AndroidKernel.mk b/AndroidKernel.mk
index d60aea3..09e44e0 100644
--- a/AndroidKernel.mk
+++ b/AndroidKernel.mk
@@ -53,10 +53,10 @@
 
 ifeq ($(KERNEL_LLVM_SUPPORT), true)
   ifeq ($(KERNEL_SD_LLVM_SUPPORT), true)  #Using sd-llvm compiler
-    ifeq ($(shell echo $(SDCLANG_PATH_2) | head -c 1),/)
-       KERNEL_LLVM_BIN := $(shell pwd)/$(SDCLANG_PATH_2)/clang
+    ifeq ($(shell echo $(SDCLANG_PATH) | head -c 1),/)
+       KERNEL_LLVM_BIN := $(SDCLANG_PATH)/clang
     else
-       KERNEL_LLVM_BIN := $(shell pwd)/$(SDCLANG_PATH_2)/clang
+       KERNEL_LLVM_BIN := $(shell pwd)/$(SDCLANG_PATH)/clang
     endif
     $(warning "Using sdllvm" $(KERNEL_LLVM_BIN))
   else
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
index c1513c7..14b2bf2 100644
--- a/Documentation/ABI/testing/sysfs-block-zram
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -98,3 +98,42 @@
 		The backing_dev file is read-write and set up backing
 		device for zram to write incompressible pages.
 		For using, user should enable CONFIG_ZRAM_WRITEBACK.
+
+What:		/sys/block/zram<id>/idle
+Date:		November 2018
+Contact:	Minchan Kim <minchan@kernel.org>
+Description:
+		idle file is write-only and mark zram slot as idle.
+		If system has mounted debugfs, user can see which slots
+		are idle via /sys/kernel/debug/zram/zram<id>/block_state
+
+What:		/sys/block/zram<id>/writeback
+Date:		November 2018
+Contact:	Minchan Kim <minchan@kernel.org>
+Description:
+		The writeback file is write-only and trigger idle and/or
+		huge page writeback to backing device.
+
+What:		/sys/block/zram<id>/bd_stat
+Date:		November 2018
+Contact:	Minchan Kim <minchan@kernel.org>
+Description:
+		The bd_stat file is read-only and represents backing device's
+		statistics (bd_count, bd_reads, bd_writes) in a format
+		similar to block layer statistics file format.
+
+What:		/sys/block/zram<id>/writeback_limit_enable
+Date:		November 2018
+Contact:	Minchan Kim <minchan@kernel.org>
+Description:
+		The writeback_limit_enable file is read-write and specifies
+		eanbe of writeback_limit feature. "1" means eable the feature.
+		No limit "0" is the initial state.
+
+What:		/sys/block/zram<id>/writeback_limit
+Date:		November 2018
+Contact:	Minchan Kim <minchan@kernel.org>
+Description:
+		The writeback_limit file is read-write and specifies the maximum
+		amount of writeback ZRAM can do. The limit could be changed
+		in run time.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 3ac4177..a7ce331 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -92,6 +92,15 @@
 Description:
 		 Controls the number of trials to find a victim segment.
 
+What:		/sys/fs/f2fs/<disk>/migration_granularity
+Date:		October 2018
+Contact:	"Chao Yu" <yuchao0@huawei.com>
+Description:
+		 Controls migration granularity of garbage collection on large
+		 section, it can let GC move partial segment{s} of one section
+		 in one GC cycle, so that dispersing heavy overhead GC to
+		 multiple lightweight one.
+
 What:		/sys/fs/f2fs/<disk>/dir_level
 Date:		March 2014
 Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1e3f86d..5449396 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2080,6 +2080,9 @@
 			off
 				Disables hypervisor mitigations and doesn't
 				emit any warnings.
+				It also drops the swap size and available
+				RAM limit restriction on both hypervisor and
+				bare metal.
 
 			Default is 'flush'.
 
diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst
index bae52b84..9f5924f 100644
--- a/Documentation/admin-guide/l1tf.rst
+++ b/Documentation/admin-guide/l1tf.rst
@@ -405,6 +405,9 @@
 
   off		Disables hypervisor mitigations and doesn't emit any
 		warnings.
+		It also drops the swap size and available RAM limit restrictions
+		on both hypervisor and bare metal.
+
   ============  =============================================================
 
 The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
@@ -576,7 +579,8 @@
   The kernel default mitigations for vulnerable processors are:
 
   - PTE inversion to protect against malicious user space. This is done
-    unconditionally and cannot be controlled.
+    unconditionally and cannot be controlled. The swap storage is limited
+    to ~16TB.
 
   - L1D conditional flushing on VMENTER when EPT is enabled for
     a guest.
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index 875b2b5..6e5c2bb 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -156,19 +156,23 @@
 A brief description of exported device attributes. For more details please
 read Documentation/ABI/testing/sysfs-block-zram.
 
-Name            access            description
-----            ------            -----------
-disksize          RW    show and set the device's disk size
-initstate         RO    shows the initialization state of the device
-reset             WO    trigger device reset
-mem_used_max      WO    reset the `mem_used_max' counter (see later)
-mem_limit         WO    specifies the maximum amount of memory ZRAM can use
-                        to store the compressed data
-max_comp_streams  RW    the number of possible concurrent compress operations
-comp_algorithm    RW    show and change the compression algorithm
-compact           WO    trigger memory compaction
-debug_stat        RO    this file is used for zram debugging purposes
-backing_dev	  RW	set up backend storage for zram to write out
+Name            	access            description
+----            	------            -----------
+disksize          	RW	show and set the device's disk size
+initstate         	RO	shows the initialization state of the device
+reset             	WO	trigger device reset
+mem_used_max      	WO	reset the `mem_used_max' counter (see later)
+mem_limit         	WO	specifies the maximum amount of memory ZRAM can use
+				to store the compressed data
+writeback_limit   	WO	specifies the maximum amount of write IO zram can
+				write out to backing device as 4KB unit
+writeback_limit_enable  RW	show and set writeback_limit feature
+max_comp_streams  	RW	the number of possible concurrent compress operations
+comp_algorithm    	RW	show and change the compression algorithm
+compact           	WO	trigger memory compaction
+debug_stat        	RO	this file is used for zram debugging purposes
+backing_dev	  	RW	set up backend storage for zram to write out
+idle		  	WO	mark allocated slot as idle
 
 
 User space is advised to use the following files to read the device statistics.
@@ -220,6 +224,17 @@
  pages_compacted  the number of pages freed during compaction
  huge_pages	  the number of incompressible pages
 
+File /sys/block/zram<id>/bd_stat
+
+The stat file represents device's backing device statistics. It consists of
+a single line of text and contains the following stats separated by whitespace:
+ bd_count	size of data written in backing device.
+		Unit: 4K bytes
+ bd_reads	the number of reads from backing device
+		Unit: 4K bytes
+ bd_writes	the number of writes to backing device
+		Unit: 4K bytes
+
 9) Deactivate:
 	swapoff /dev/zram0
 	umount /dev/zram1
@@ -237,11 +252,79 @@
 
 = writeback
 
-With incompressible pages, there is no memory saving with zram.
-Instead, with CONFIG_ZRAM_WRITEBACK, zram can write incompressible page
+With CONFIG_ZRAM_WRITEBACK, zram can write idle/incompressible page
 to backing storage rather than keeping it in memory.
-User should set up backing device via /sys/block/zramX/backing_dev
-before disksize setting.
+To use the feature, admin should set up backing device via
+
+	"echo /dev/sda5 > /sys/block/zramX/backing_dev"
+
+before disksize setting. It supports only partition at this moment.
+If admin want to use incompressible page writeback, they could do via
+
+	"echo huge > /sys/block/zramX/write"
+
+To use idle page writeback, first, user need to declare zram pages
+as idle.
+
+	"echo all > /sys/block/zramX/idle"
+
+From now on, any pages on zram are idle pages. The idle mark
+will be removed until someone request access of the block.
+IOW, unless there is access request, those pages are still idle pages.
+
+Admin can request writeback of those idle pages at right timing via
+
+	"echo idle > /sys/block/zramX/writeback"
+
+With the command, zram writeback idle pages from memory to the storage.
+
+If there are lots of write IO with flash device, potentially, it has
+flash wearout problem so that admin needs to design write limitation
+to guarantee storage health for entire product life.
+
+To overcome the concern, zram supports "writeback_limit" feature.
+The "writeback_limit_enable"'s default value is 0 so that it doesn't limit
+any writeback. IOW, if admin want to apply writeback budget, he should
+enable writeback_limit_enable via
+
+	$ echo 1 > /sys/block/zramX/writeback_limit_enable
+
+Once writeback_limit_enable is set, zram doesn't allow any writeback
+until admin set the budget via /sys/block/zramX/writeback_limit.
+
+(If admin doesn't enable writeback_limit_enable, writeback_limit's value
+assigned via /sys/block/zramX/writeback_limit is meaninless.)
+
+If admin want to limit writeback as per-day 400M, he could do it
+like below.
+
+	$ MB_SHIFT=20
+	$ 4K_SHIFT=12
+	$ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
+		/sys/block/zram0/writeback_limit.
+	$ echo 1 > /sys/block/zram0/writeback_limit_enable
+
+If admin want to allow further write again once the bugdet is exausted,
+he could do it like below
+
+	$ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
+		/sys/block/zram0/writeback_limit
+
+If admin want to see remaining writeback budget since he set,
+
+	$ cat /sys/block/zramX/writeback_limit
+
+If admin want to disable writeback limit, he could do
+
+	$ echo 0 > /sys/block/zramX/writeback_limit_enable
+
+The writeback_limit count will reset whenever you reset zram(e.g.,
+system reboot, echo 1 > /sys/block/zramX/reset) so keeping how many of
+writeback happened until you reset the zram to allocate extra writeback
+budget in next setting is user's job.
+
+If admin want to measure writeback count in a certain period, he could
+know it via /sys/block/zram0/bd_stat's 3rd column.
 
 = memory tracking
 
@@ -251,16 +334,17 @@
 If you enable the feature, you could see block state via
 /sys/kernel/debug/zram/zram0/block_state". The output is as follows,
 
-	  300    75.033841 .wh
-	  301    63.806904 s..
-	  302    63.806919 ..h
+	  300    75.033841 .wh.
+	  301    63.806904 s...
+	  302    63.806919 ..hi
 
 First column is zram's block index.
 Second column is access time since the system was booted
 Third column is state of the block.
 (s: same page
 w: written page to backing store
-h: huge page)
+h: huge page
+i: idle page)
 
 First line of above example says 300th block is accessed at 75.033841sec
 and the block's state is huge so it is written back to the backing
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index d34a63c..e2b0e95 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -169,6 +169,9 @@
 
 	* qcom,tpdm-regs: List of regulators required.
 
+	* qcom,hw-enable-check: Check if the tpdm need to be probed as some tpdms
+	  are not enabled in secure device.
+
 * Optional properties for CSRs:
 
 	* qcom,usb-bam-support: boolean, indicates CSR has the ability to operate on
diff --git a/Documentation/devicetree/bindings/arm/msm/android.txt b/Documentation/devicetree/bindings/arm/msm/android.txt
index 7b8b790..32e418f 100644
--- a/Documentation/devicetree/bindings/arm/msm/android.txt
+++ b/Documentation/devicetree/bindings/arm/msm/android.txt
@@ -53,6 +53,38 @@
 		};
 	};
 
+odm:
+-----------------
+
+odm partition specification.
+
+Required properties:
+
+-compatible: "android, odm"
+-dev: block device corresponding to odm partition
+-type: file system type of odm partition
+-mnt_flags: mount flags
+-fsmgr_flags: fsmgr flags
+
+Example:
+
+       firmware: firmware {
+               android {
+                       compatible = "android,firmware";
+                       fstab {
+                               compatible = "android,fstab";
+                               odm {
+                                       compatible = "android,odm";
+                                       dev = "/dev/block/platform/soc/1da4000.ufshc/by-name/odm";
+                                       type = "ext4";
+                                       mnt_flags = "ro,barrier=1,discard";
+                                       fsmgr_flags = "wait,slotselect";
+                                       status = "ok";
+                               };
+                       };
+               };
+       };
+
 system:
 -----------------
 
diff --git a/Documentation/devicetree/bindings/batterydata/batterydata.txt b/Documentation/devicetree/bindings/batterydata/batterydata.txt
index 3bd1122..b3d1adc 100644
--- a/Documentation/devicetree/bindings/batterydata/batterydata.txt
+++ b/Documentation/devicetree/bindings/batterydata/batterydata.txt
@@ -120,13 +120,28 @@
 			The threshold values in range should be in ascending
 			and shouldn't overlap. It support 8 ranges at max.
 - qcom,jeita-soft-thresholds: A tuple entry to specify ADC code for battery's soft JEITA
-				threshold.
-				<SOFT_COLD_ADC_CODE, SOFT_HOT_ADC_CODE>.
+			threshold. <SOFT_COLD_ADC_CODE, SOFT_HOT_ADC_CODE>.
 - qcom,jeita-hard-thresholds: A tuple entry to specify ADC code for battery's hard JEITA
-				threshold.
-				<HARD_COLD_ADC_CODE, HARD_HOT_ADC_CODE>.
+			threshold. <HARD_COLD_ADC_CODE, HARD_HOT_ADC_CODE>.
+- qcom,jeita-soft-hys-thresholds: A tuple entry to specify ADC code for battery's soft JEITA
+			threshold with hysteresis adjustment.
+			<SOFT_COLD_ADC_CODE, SOFT_HOT_ADC_CODE>.
+			These "hysteresis" values should be specified if
+			"qcom,jeita-soft-thresholds" are specified. Without which SW JEITA
+			compensation won't function properly.
+- qcom,jeita-soft-fcc-ua: A tuple entry to specify the values of Fast
+			charging current (in uA) that needs to be applied during
+			soft JEITA conditions (cool/warm).
+			Element 0 - FCC value for soft cool.
+			Element 1 - FCC value for soft warm.
+- qcom,jeita-soft-fv-uv: A tuple entry to specify the values of Float
+			voltage (in uV) that needs to be applied during soft
+			JEITA conditions (cool/warm).
+			Element 0 - FV value for soft cool.
+			Element 1 - FV value for soft warm.
 - qcom,batt-age-level:  Battery age level. This is used only when multiple
 			profile loading is supported.
+
 Profile data node required subnodes:
 - qcom,fcc-temp-lut : An 1-dimensional lookup table node that encodes
 			temperature to fcc lookup. The units for this lookup
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 99de140..a3f21d0 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -312,6 +312,13 @@
 				defined in reg property.
 - qcom,sde-reg-dma-broadcast-disabled: Boolean property to indicate if broadcast
 				functionality in the register dma hardware block should be used.
+- qcom,sde-reg-dma-xin-id:	VBIF clients id (xin) corresponding
+				to the LUTDMA block.
+- qcom,sde-reg-dma-clk-ctrl:	Array of 2 cell property describing clk control
+				offsets for dynamic clock gating. 1st value
+				in the array represents offset of the control
+				register. 2nd value represents bit offset within
+				control register.
 - qcom,sde-dram-channels:	This represents the number of channels in the
 				Bus memory controller.
 - qcom,sde-num-nrt-paths:	Integer property represents the number of non-realtime
@@ -371,6 +378,10 @@
 				priority for realtime clients.
 - qcom,sde-vbif-qos-nrt-remap:	This array is used to program vbif qos remapper register
 				priority for non-realtime clients.
+- qcom,sde-vbif-qos-cwb-remap:	This array is used to program vbif qos remapper register
+				priority for concurrent writeback clients.
+- qcom,sde-vbif-qos-lutdma-remap:	This array is used to program vbif qos remapper register
+				priority for lutdma client.
 - qcom,sde-danger-lut:		Array of 5 cell property, with a format of
 				<linear, tile, nrt, cwb, tile-qseed>,
 				indicating the danger luts on sspp.
@@ -733,6 +744,15 @@
 
     qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
     qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
+    qcom,sde-vbif-qos-cwb-remap = <3 3 4 4 5 5 6 3>;
+    qcom,sde-vbif-qos-lutdma-remap = <3 3 3 3 4 4 4 4>;
+
+    qcom,sde-reg-dma-off = <0>;
+    qcom,sde-reg-dma-version = <0x00010002>;
+    qcom,sde-reg-dma-trigger-off = <0x119c>;
+    qcom,sde-reg-dma-broadcast-disabled = <0>;
+    qcom,sde-reg-dma-xin-id = <7>;
+    qcom,sde-reg-dma-clk-ctrl = <0x2bc 20>;
 
     qcom,sde-sspp-vig-blocks {
         qcom,sde-vig-csc-off = <0x320>;
diff --git a/Documentation/devicetree/bindings/input/qti-haptics.txt b/Documentation/devicetree/bindings/input/qti-haptics.txt
index 78c674a..b86bae9 100644
--- a/Documentation/devicetree/bindings/input/qti-haptics.txt
+++ b/Documentation/devicetree/bindings/input/qti-haptics.txt
@@ -59,19 +59,6 @@
 		specified in the LRA actuator datasheet. Allowed values are:
 		0 to 20475. If this is not specified, 5715us play rate is used.
 
-- qcom,external-waveform-source
-  Usage:      optional
-  Value type: <string>
-  Definition: The haptics module supports to play with internal constant
-		Vmax strength or play with patterns specified in its internal
-		8-bytes waveform buffer. It can also play with the audio
-		LINE-IN signal or PWM waveform coming from LINE-IN/PWM pin.
-		This property specify the kind of the waveform resources
-		on the LINE-IN/PWM pins. Allowed values are: "audio", "pwm".
-		If this is not specified, internal signals (Vmax or buffer)
-		will be selected according to the requriement of the playing
-		waveforms.
-
 - vdd-supply
   Usage:      optional
   Value type: <phandle>
@@ -114,7 +101,7 @@
 		notification event.
 
 - qcom,wf-pattern
-  Usage:      required
+  Usage:      optional
   Value type: <prop-encoded-array>
   Definition: Specifies the waveform pattern in a byte array that will be
 		played for the effect-id. The bit fields of each byte are:
@@ -172,6 +159,19 @@
   Definition: If specified, the hardware feature of LRA auto resonance detection
 		is disabled.
 
+- qcom,wf-line-in-audio
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag to indicate if the effect is playing the audio signal
+		comes into LINE-IN pin. If this is specified, the pattern
+		specified in "qcom,wf-pattern" will be ignored.
+
+- qcom,wf-line-in-pwm
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag to indicate if the effect is playing the PWM signal
+		comes into LINE-IN pin. If this is specified, the pattern
+		specified in "qcom,wf-pattern" will be ignored.
 Example:
   qcom,haptics@c000 {
 	compatible = "qcom,haptics";
@@ -200,4 +200,10 @@
 		qcom,wf-play-rate-us = <6250>;
 		qcom,wf-pattern = [7e 7e 7e];
 	};
+
+	wf_6 {
+		/* RINGTONE_x effect */
+		qcom,effect-id = <6>;
+		qcom,wf-line-in-audio;
+	};
   };
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 9c7181b..c14f9ba 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -138,6 +138,15 @@
                   clients who do not detach, it's not possible to keep regulator
                   vote while smmu is attached. Type is <u32>.
 
+- qcom,no-dynamic-asid:
+		  Clients that uses the dynamic domains will have an unique asid
+		  per each domain and all domains can share the same context bank.
+		  When ASID based invalidation is used, on some hardware revisions,
+		  as a result of multiple ASID's associated with the same context
+		  bank, TLB entries are not invalidated properly. On such systems,
+		  we can choose to have a single ASID associated with all domains
+		  for a context bank.
+
 - clocks        : List of clocks to be used during SMMU register access. See
                   Documentation/devicetree/bindings/clock/clock-bindings.txt
                   for information about the format. For each clock specified
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,lito-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,lito-pinctrl.txt
index 3fcfc54..c7acea3 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,lito-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,lito-pinctrl.txt
@@ -40,6 +40,11 @@
 	Definition: must be 2. Specifying the pin number and flags, as defined
 		    in <dt-bindings/gpio/gpio.h>
 
+- wakeup-parent:
+	Usage: optional
+	Value type: <phandle>
+	Definition: A phandle to the wakeup interrupt controller for the SoC.
+
 Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
 a general description of GPIO and interrupt bindings.
 
@@ -183,4 +188,5 @@
 		#gpio-cells = <2>;
 		interrupt-controller;
 		#interrupt-cells = <2>;
+		wakeup-parent = <&pdc>;
 	};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt
index 94500c5..465b39e 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt
@@ -37,6 +37,18 @@
 		    The first cell will be used to define gpio number and the
 		    second denotes the flags for this gpio.
 
+- #qcom,slew-reg:
+	Usage: optional
+	Value type: <prop-encoded-array>
+	Definition: Register base of the slew register and length.
+
+- #qcom,lpi-slew-offset-tbl:
+	Usage: optional
+	Value type: <u32-array>
+	Definition: Offset table that points to each pin's shift value
+		    position in bits in the slew register base for slew
+		    settings.
+
 Please refer to ../gpio/gpio.txt for general description of GPIO bindings.
 
 Please refer to pinctrl-bindings.txt in this directory for details of the
@@ -123,12 +135,18 @@
 	Value type: <u32>
 	Definition: Selects the drive strength for the specified pins.
 
+- slew-rate:
+	Usage: optional
+	Value type: <u32>
+	Definition: Selects the slew rate for the specified pins.
+
 Example:
 
 	lpi_tlmm: lpi_pinctrl@152c000 {
 		compatible = "qcom,lpi-pinctrl";
 		qcom,num-gpios = <32>;
 		reg = <0x152c000 0>;
+		qcom,slew-reg = <0x355a000 0x0>;
 		gpio-controller;
 		#gpio-cells = <2>;
 		qcom,lpi-offset-tbl = <0x00000010>, <0x00000020>,
@@ -142,6 +160,13 @@
 				<0x00000170>, <0x00000180>,
 				<0x00000190>, <0x00000200>,
 				<0x00000210>;
+		qcom,lpi-slew-offset-tbl = <0x00000000>, <0x00000002>,
+				<0x00000004>, <0x00000008>,
+				<0x0000000A>, <0x0000000C>,
+				<0x00000000>, <0x00000000>,
+				<0x00000000>, <0x00000000>,
+				<0x00000010>, <0x00000012>,
+				<0x00000000>, <0x00000000>;
 
 		hph_comp_active: hph_comp_active {
 			mux {
@@ -165,6 +190,7 @@
 			config {
 				pins = "gpio22";
 				qcom,drive-strength = <2>;
+				slew-rate = <1>;
 			};
 		};
 	};
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index b439928..1ba6974 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -92,6 +92,20 @@
 				over pcie bus or not.
 - qcom,ipa-wdi2_over_gsi: Boolean context flag to indicate WDI2 offload over GSI
 				supported or not.
+- qcom,register-collection-on-crash: Boolean that controls IPA/GSI register
+				collection upon system crash (i.e. SSR).
+- qcom,testbus-collection-on-crash: Boolean that controls testbus register
+				collection upon system crash.
+- qcom,non-tn-collection-on-crash: Boolean to control a certain subset of IPA/GSI
+				register collection relative to an SSR.  Accessing
+				these registers can cause stalling, hence this
+				control.
+- qcom,entire-ipa-block-size: Complete size of the ipa block in which all
+				registers, collected upon crash, reside.
+- qcom,secure-debug-check-action: Drives secure memory debug check. Three values allowed:
+				0 (use scm call),
+				1 (override scm call as though it returned true), and
+				2 (override scm call as though it returned false)
 
 Optional properties:
 -qcom,ipa-pipe-mem: Specifies the base physical address and the
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt
index 342787f..ac8fec3 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt
@@ -52,6 +52,15 @@
 		    is not specified, then the default value used will be
 		    2812 mV.
 
+- qcom,fg-sys-min-voltage
+	Usage:      optional
+	Value type: <u32>
+	Definition: The voltage threshold (in mV) which describes the system
+		    minimum voltage as per the hardware recommendation. This
+		    is not used for any configuration but only for calculating
+		    the available power. If this property is not specified,
+		    then the default value used is 2800 mV.
+
 - qcom,fg-sys-term-current
 	Usage:      optional
 	Value type: <u32>
@@ -419,6 +428,13 @@
 		    multiple battery profiles to be specified for a battery for
 		    proper functionality.
 
+- qcom,soc-hi-res
+	Usage:      optional
+	Value type: <empty>
+	Definition: A boolean property that when specified shows high
+		    resolution of monotonic SOC under CAPACITY_RAW property
+		    during charging in the scale of 0-10000.
+
 ==========================================================
 Second Level Nodes - Peripherals managed by FG Gen4 driver
 ==========================================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo5.txt
index d997edd..fb27728 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo5.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo5.txt
@@ -30,6 +30,18 @@
   Definition: Specifies the interrupt name for Qnovo5. There is only one
 		interrupt named as "ptrain-done".
 
+- pinctrl-N:
+  Usage:      optional
+  Value type: <phandle>
+  Definition: Specifies the pinctrl configuration that needs to be applied
+		when the charger is removed for controlling external FET.
+
+- pinctrl-names:
+  Usage:      optional
+  Value type: <string>
+  Definition: Specifies the names for pinctrl configurations defined above.
+		Allowed names are "q_state1" and "q_state2".
+
 Example:
 
 qcom,qpnp-qnovo@b000 {
@@ -37,4 +49,7 @@
 	reg = <0xb000 0x100>;
 	interrupts = <0x2 0xb0 0x1 IRQ_TYPE_NONE>;
 	interrupt-names = "ptrain-done";
+	pinctrl-names = "q_state1", "q_state2";
+	pinctrl-0 = <&qnovo_fet_ctrl_state1>;
+	pinctrl-1 = <&qnovo_fet_ctrl_state2>;
 };
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
index 2515f05..6b4ee3b 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
@@ -251,6 +251,12 @@
   Definition: Boolean flag which when present enables stepwise change in FCC.
 		The default stepping rate is 100mA/sec.
 
+- qcom,disable-suspend-on-collapse
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present disables suspend on collapse
+		feature of charger hardware.
+
 =============================================
 Second Level Nodes - SMB5 Charger Peripherals
 =============================================
diff --git a/Documentation/devicetree/bindings/prng/msm-rng.txt b/Documentation/devicetree/bindings/prng/msm-rng.txt
new file mode 100644
index 0000000..917c2fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/prng/msm-rng.txt
@@ -0,0 +1,18 @@
+* RNG (Random Number Generator)
+
+Required properties:
+- compatible : Should be "qcom,msm-rng"
+- reg        : Offset and length of the register set for the device
+
+Optional property:
+- qcom,msm-rng-iface-clk : If the device uses iface-clk.
+- qcom,no-qrng-config    : Flag to decide whether the driver do the hardware configuration or not.
+
+Example:
+
+	qcom,msm-rng@f9bff000 {
+		compatible = "qcom,msm-rng";
+		reg = <0xf9bff000 0x200>;
+		qcom,msm-rng-iface-clk;
+		qcom,no-qrng-config;
+	};
diff --git a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
index 19a9d359..ddbcf45 100644
--- a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
@@ -18,6 +18,9 @@
  - qcom,retain-periph: Presence denotes a hardware requirement to leave the
 		     forced periph memory retention signal in the core's clock
 		     branch control registers asserted.
+ - qcom,retain-regs: Presence denotes a hardware requirement to enable the
+		     usage of retention registers which maintain their state
+		     after the GDSC is disabled and re-enabled.
  - qcom,skip-logic-collapse: Presence denotes a requirement to leave power to
                              the core's logic enabled.
  - qcom,support-hw-trigger: Presence denotes a hardware feature to switch
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
index 800508a..08330eb 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
@@ -11,6 +11,10 @@
  - reg: Should be address and size of EUD register space
  - reg-names: Should be "eud_base"
 
+Optional properties:
+ - reg-names: Can be "eud_mode_mgr2" for secure eud
+ - qcom,secure-eud-en: To enable secure eud
+
 Driver notifies clients via extcon for VBUS spoof attach/detach
 and charger enable/disable events. Clients registered for these
 notifications should have extcon property set to eud.
diff --git a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
index 842292b..c1508e4 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
+++ b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
@@ -29,9 +29,9 @@
 - vdda-pll-supply   : phandle to PHY PLL and Power-Gen block power supply
 - clocks	    : List of phandle and clock specifier pairs
 - clock-names       : List of clock input name strings sorted in the same
-		      order as the clocks property. "ref_clk_src", "ref_clk",
+		      order as the clocks property. "ref_clk_src",
 		      "tx_iface_clk" & "rx_iface_clk" are mandatory but
-		      "ref_clk_parent" is optional
+		      "ref_clk_parent" and "ref_clk" are optional
 
 Optional properties:
 - vdda-phy-max-microamp : specifies max. load that can be drawn from phy supply
diff --git a/Documentation/devicetree/bindings/usb/qcom,msm-phy.txt b/Documentation/devicetree/bindings/usb/qcom,msm-phy.txt
index 137a6a0..8e22fcc 100644
--- a/Documentation/devicetree/bindings/usb/qcom,msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/qcom,msm-phy.txt
@@ -110,8 +110,12 @@
  - qcom,vbus-valid-override: If present, indicates VBUS pin is not connected to
    the USB PHY and the controller must rely on external VBUS notification in
    order to manually relay the notification to the SSPHY.
+ - qcom,vdd-max-load-uA: If present, indicates the maximum current (in uA) the
+   PHY is expected to draw from the vdd power supply.
  - qcom,core-voltage-level: This property must be a list of three integer
    values (no, min, max) where each value represents either a voltage in
+ - qcom,core-max-load-uA: If present, indicates the maximum current (in uA) the
+   PHY is expected to draw from the core power supply.
    microvolts or a value corresponding to voltage corner.
  - qcom,link-training-reset: This property indicates to start link training
    timer to reset the elastic buffer based on rx equalization value.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 02ba213..a9ce10b 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -499,7 +499,9 @@
 
 Note that there is no guarantee that every flag and associated mnemonic will
 be present in all further kernel releases. Things get changed, the flags may
-be vanished or the reverse -- new added.
+be vanished or the reverse -- new added. Interpretation of their meaning
+might change in future as well. So each consumer of these flags has to
+follow each specific kernel version for the exact semantic.
 
 The "Name" field will only be present on a mapping that has been named by
 userspace, and will show the name passed in by userspace.
diff --git a/Documentation/networking/regulatory.txt b/Documentation/networking/regulatory.txt
index 381e5b2..46c8d8b 100644
--- a/Documentation/networking/regulatory.txt
+++ b/Documentation/networking/regulatory.txt
@@ -200,5 +200,23 @@
 Statically compiled regulatory database
 ---------------------------------------
 
-When a database should be fixed into the kernel, it can be provided as a
-firmware file at build time that is then linked into the kernel.
+In most situations the userland solution using CRDA as described
+above is the preferred solution.  However in some cases a set of
+rules built into the kernel itself may be desirable.  To account
+for this situation, a configuration option has been provided
+(i.e. CONFIG_CFG80211_INTERNAL_REGDB).  With this option enabled,
+the wireless database information contained in net/wireless/db.txt is
+used to generate a data structure encoded in net/wireless/regdb.c.
+That option also enables code in net/wireless/reg.c which queries
+the data in regdb.c as an alternative to using CRDA.
+
+The file net/wireless/db.txt should be kept up-to-date with the db.txt
+file available in the git repository here:
+
+    git://git.kernel.org/pub/scm/linux/kernel/git/sforshee/wireless-regdb.git
+
+Again, most users in most situations should be using the CRDA package
+provided with their distribution, and in most other situations users
+should be building and using CRDA on their own rather than using
+this option.  If you are not absolutely sure that you should be using
+CONFIG_CFG80211_INTERNAL_REGDB then _DO_NOT_USE_IT_.
diff --git a/Makefile b/Makefile
index dc0d097..3904be0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 19
-SUBLEVEL = 12
+SUBLEVEL = 20
 EXTRAVERSION =
 NAME = "People's Front"
 
@@ -487,21 +487,21 @@
 ifeq ($(cc-name),clang)
 ifneq ($(CROSS_COMPILE),)
 CLANG_TRIPLE	?= $(CROSS_COMPILE)
-CLANG_TARGET	:= --target=$(notdir $(CLANG_TRIPLE:%-=%))
-ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_TARGET)), y)
+CLANG_FLAGS	:= --target=$(notdir $(CLANG_TRIPLE:%-=%))
+ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_FLAGS)), y)
 $(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
 endif
 GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
-CLANG_PREFIX	:= --prefix=$(GCC_TOOLCHAIN_DIR)
+CLANG_FLAGS	+= --prefix=$(GCC_TOOLCHAIN_DIR)
 GCC_TOOLCHAIN	:= $(realpath $(GCC_TOOLCHAIN_DIR)/..)
 endif
 ifneq ($(GCC_TOOLCHAIN),)
-CLANG_GCC_TC	:= --gcc-toolchain=$(GCC_TOOLCHAIN)
+CLANG_FLAGS	+= --gcc-toolchain=$(GCC_TOOLCHAIN)
 endif
-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
-KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
-KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+CLANG_FLAGS	+= -no-integrated-as
+KBUILD_CFLAGS	+= $(CLANG_FLAGS)
+KBUILD_AFLAGS	+= $(CLANG_FLAGS)
+export CLANG_FLAGS
 endif
 
 RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
@@ -1005,11 +1005,6 @@
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
-    ifdef CONFIG_UNWINDER_ORC
-      $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
-    else
-      $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
-    endif
     SKIP_STACK_VALIDATION := 1
     export SKIP_STACK_VALIDATION
   endif
@@ -1166,6 +1161,14 @@
 
 PHONY += prepare-objtool
 prepare-objtool: $(objtool_target)
+ifeq ($(SKIP_STACK_VALIDATION),1)
+ifdef CONFIG_UNWINDER_ORC
+	@echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
+	@false
+else
+	@echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
+endif
+endif
 
 ifdef cfi-flags
   ifeq ($(call cc-option, $(cfi-flags)),)
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index a90c4f1..ac69f30 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -26,6 +26,7 @@
 	select GENERIC_IRQ_SHOW
 	select GENERIC_PCI_IOMAP
 	select GENERIC_PENDING_IRQ if SMP
+	select GENERIC_SCHED_CLOCK
 	select GENERIC_SMP_IDLE_THREAD
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_TRACEHOOK
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 9185541..6958545 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -103,7 +103,8 @@
 
 	/* counts condition */
 	[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
+	/* All jump instructions that are taken */
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
 	[PERF_COUNT_ARC_BPOK]         = "bpok",	  /* NP-NT, PT-T, PNT-NT */
 #ifdef CONFIG_ISA_ARCV2
 	[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index 62ad4bc..f230bb7 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -7,11 +7,39 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/cache.h>
 
-#undef PREALLOC_NOT_AVAIL
+/*
+ * The memset implementation below is optimized to use prefetchw and prealloc
+ * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
+ * If you want to implement optimized memset for other possible L1 data cache
+ * line lengths (32B and 128B) you should rewrite code carefully checking
+ * we don't call any prefetchw/prealloc instruction for L1 cache lines which
+ * don't belongs to memset area.
+ */
+
+#if L1_CACHE_SHIFT == 6
+
+.macro PREALLOC_INSTR	reg, off
+	prealloc	[\reg, \off]
+.endm
+
+.macro PREFETCHW_INSTR	reg, off
+	prefetchw	[\reg, \off]
+.endm
+
+#else
+
+.macro PREALLOC_INSTR
+.endm
+
+.macro PREFETCHW_INSTR
+.endm
+
+#endif
 
 ENTRY_CFI(memset)
-	prefetchw [r0]		; Prefetch the write location
+	PREFETCHW_INSTR	r0, 0	; Prefetch the first write location
 	mov.f	0, r2
 ;;; if size is zero
 	jz.d	[blink]
@@ -48,11 +76,8 @@
 
 	lpnz	@.Lset64bytes
 	;; LOOP START
-#ifdef PREALLOC_NOT_AVAIL
-	prefetchw [r3, 64]	;Prefetch the next write location
-#else
-	prealloc  [r3, 64]
-#endif
+	PREALLOC_INSTR	r3, 64	; alloc next line w/o fetching
+
 #ifdef CONFIG_ARC_HAS_LL64
 	std.ab	r4, [r3, 8]
 	std.ab	r4, [r3, 8]
@@ -85,7 +110,6 @@
 	lsr.f	lp_count, r2, 5 ;Last remaining  max 124 bytes
 	lpnz	.Lset32bytes
 	;; LOOP START
-	prefetchw   [r3, 32]	;Prefetch the next write location
 #ifdef CONFIG_ARC_HAS_LL64
 	std.ab	r4, [r3, 8]
 	std.ab	r4, [r3, 8]
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index ba14506..f890b2f 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -138,7 +138,8 @@
 	 */
 
 	memblock_add_node(low_mem_start, low_mem_sz, 0);
-	memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
+	memblock_reserve(CONFIG_LINUX_LINK_BASE,
+			 __pa(_end) - CONFIG_LINUX_LINK_BASE);
 
 #ifdef CONFIG_BLK_DEV_INITRD
 	if (initrd_start)
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
index 03611d50..e84544b 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
@@ -26,8 +26,7 @@
 			"Speakers", "SPKL",
 			"Speakers", "SPKR";
 
-		assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
-				<&clock CLK_MOUT_EPLL>,
+		assigned-clocks = <&clock CLK_MOUT_EPLL>,
 				<&clock CLK_MOUT_MAU_EPLL>,
 				<&clock CLK_MOUT_USER_MAU_EPLL>,
 				<&clock_audss EXYNOS_MOUT_AUDSS>,
@@ -36,8 +35,7 @@
 				<&clock_audss EXYNOS_DOUT_AUD_BUS>,
 				<&clock_audss EXYNOS_DOUT_I2S>;
 
-		assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
-				<&clock CLK_FOUT_EPLL>,
+		assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
 				<&clock CLK_MOUT_EPLL>,
 				<&clock CLK_MOUT_MAU_EPLL>,
 				<&clock CLK_MAU_EPLL>,
@@ -48,7 +46,6 @@
 				<0>,
 				<0>,
 				<0>,
-				<0>,
 				<196608001>,
 				<(196608002 / 2)>,
 				<196608000>;
@@ -84,4 +81,6 @@
 
 &i2s0 {
 	status = "okay";
+	assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
+	assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
 };
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
index 4a30cc8..122174e 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts
+++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
@@ -33,8 +33,7 @@
 		compatible = "samsung,odroid-xu3-audio";
 		model = "Odroid-XU4";
 
-		assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
-				<&clock CLK_MOUT_EPLL>,
+		assigned-clocks = <&clock CLK_MOUT_EPLL>,
 				<&clock CLK_MOUT_MAU_EPLL>,
 				<&clock CLK_MOUT_USER_MAU_EPLL>,
 				<&clock_audss EXYNOS_MOUT_AUDSS>,
@@ -43,8 +42,7 @@
 				<&clock_audss EXYNOS_DOUT_AUD_BUS>,
 				<&clock_audss EXYNOS_DOUT_I2S>;
 
-		assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
-				<&clock CLK_FOUT_EPLL>,
+		assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
 				<&clock CLK_MOUT_EPLL>,
 				<&clock CLK_MOUT_MAU_EPLL>,
 				<&clock CLK_MAU_EPLL>,
@@ -55,7 +53,6 @@
 				<0>,
 				<0>,
 				<0>,
-				<0>,
 				<196608001>,
 				<(196608002 / 2)>,
 				<196608000>;
@@ -79,6 +76,8 @@
 
 &i2s0 {
 	status = "okay";
+	assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
+	assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
 };
 
 &pwm {
diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts
index d8aac4a..177d21f 100644
--- a/arch/arm/boot/dts/imx7d-nitrogen7.dts
+++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts
@@ -86,13 +86,17 @@
 		compatible = "regulator-fixed";
 		regulator-min-microvolt = <3300000>;
 		regulator-max-microvolt = <3300000>;
-		clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
-		clock-names = "slow";
 		regulator-name = "reg_wlan";
 		startup-delay-us = <70000>;
 		gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
 		enable-active-high;
 	};
+
+	usdhc2_pwrseq: usdhc2_pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+		clock-names = "ext_clock";
+	};
 };
 
 &adc1 {
@@ -375,6 +379,7 @@
 	bus-width = <4>;
 	non-removable;
 	vmmc-supply = <&reg_wlan>;
+	mmc-pwrseq = <&usdhc2_pwrseq>;
 	cap-power-off-card;
 	keep-power-in-suspend;
 	status = "okay";
diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
index 21973eb..f27b384 100644
--- a/arch/arm/boot/dts/imx7d-pico.dtsi
+++ b/arch/arm/boot/dts/imx7d-pico.dtsi
@@ -100,6 +100,19 @@
 		regulator-min-microvolt = <1800000>;
 		regulator-max-microvolt = <1800000>;
 	};
+
+	usdhc2_pwrseq: usdhc2_pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+		clock-names = "ext_clock";
+	};
+};
+
+&clks {
+	assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>,
+			  <&clks IMX7D_CLKO2_ROOT_DIV>;
+	assigned-clock-parents = <&clks IMX7D_CKIL>;
+	assigned-clock-rates = <0>, <32768>;
 };
 
 &i2c4 {
@@ -199,12 +212,13 @@
 
 &usdhc2 { /* Wifi SDIO */
 	pinctrl-names = "default";
-	pinctrl-0 = <&pinctrl_usdhc2>;
+	pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_wifi_clk>;
 	no-1-8-v;
 	non-removable;
 	keep-power-in-suspend;
 	wakeup-source;
 	vmmc-supply = <&reg_ap6212>;
+	mmc-pwrseq = <&usdhc2_pwrseq>;
 	status = "okay";
 };
 
@@ -301,6 +315,12 @@
 };
 
 &iomuxc_lpsr {
+	pinctrl_wifi_clk: wificlkgrp {
+		fsl,pins = <
+			MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2	0x7d
+		>;
+	};
+
 	pinctrl_wdog: wdoggrp {
 		fsl,pins = <
 			MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B	0x74
diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
index c7ce415..f250b20 100644
--- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
@@ -309,8 +309,8 @@
 
 &reg_dldo3 {
 	regulator-always-on;
-	regulator-min-microvolt = <2500000>;
-	regulator-max-microvolt = <2500000>;
+	regulator-min-microvolt = <3300000>;
+	regulator-max-microvolt = <3300000>;
 	regulator-name = "vcc-pd";
 };
 
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 92fd2c8..12659ce 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -10,7 +10,7 @@
 #ifndef _ASM_PGTABLE_2LEVEL_H
 #define _ASM_PGTABLE_2LEVEL_H
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 
 /*
  * Hardware-wise, we have a two level page table structure, where the first
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 318394e..5e11ad3 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -83,7 +83,7 @@
 	} else /* remote PCI bus */
 		base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
 
-	return base + (where & 0xffc) + (devfn << 12);
+	return base + where + (devfn << 12);
 }
 
 static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index 243a108..fd0053e 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -110,7 +110,7 @@
 	 * except for power up sw2iso which need to be
 	 * larger than LDO ramp up time.
 	 */
-	imx_gpc_set_arm_power_up_timing(2, 1);
+	imx_gpc_set_arm_power_up_timing(0xf, 1);
 	imx_gpc_set_arm_power_down_timing(1, 1);
 
 	return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 60f3dd7..1e35170 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -18,7 +18,7 @@
 # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
 # for relative relocs, since this leads to better Image compression
 # with the relocation offsets always being zero.
-LDFLAGS_vmlinux		+= -pie -shared -Bsymbolic \
+LDFLAGS_vmlinux		+= -shared -Bsymbolic -z notext -z norelro \
 			$(call ld-option, --no-apply-dynamic-relocs)
 endif
 
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 176e38d..ec0da5b 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -27,6 +27,23 @@
 		method = "smc";
 	};
 
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		/*
+		 * This area matches the mapping done with a
+		 * mainline U-Boot, and should be updated by the
+		 * bootloader.
+		 */
+
+		psci-area@4000000 {
+			reg = <0x0 0x4000000 0x0 0x200000>;
+			no-map;
+		};
+	};
+
 	ap806 {
 		#address-cells = <2>;
 		#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
index a747b7b..387be39 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
@@ -17,8 +17,13 @@
 	model = "MediaTek MT7622 RFB1 board";
 	compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622";
 
+	aliases {
+		serial0 = &uart0;
+	};
+
 	chosen {
-		bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+		stdout-path = "serial0:115200n8";
+		bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
 	};
 
 	cpus {
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
index 71b39de..da87cc9 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 &mdss_mdp {
@@ -9,9 +9,9 @@
 				"nt35695b truly fhd command mode dsi panel";
 		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
 
-		qcom,dsi-ctrl-num = <1>;
-		qcom,dsi-phy-num = <1>;
-		qcom,dsi-select-clocks = "src_byte_clk1", "src_pixel_clk1";
+		qcom,dsi-ctrl-num = <0>;
+		qcom,dsi-phy-num = <0>;
+		qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0";
 
 		qcom,mdss-dsi-virtual-channel-id = <0>;
 		qcom,mdss-dsi-stream = <0>;
diff --git a/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
index d9d4054..9a3f55c 100644
--- a/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
@@ -181,6 +181,8 @@
 &kona_snd {
 	qcom,model = "kona-mtp-snd-card";
 	qcom,msm-mi2s-master = <1>, <1>, <1>;
+	qcom,wcn-bt = <1>;
+	qcom,ext-disp-audio-rx = <1>;
 	qcom,audio-routing =
 		"AMIC1", "MIC BIAS1",
 		"MIC BIAS1", "Analog Mic1",
@@ -208,9 +210,9 @@
 		"IN2_HPHR", "HPHR_OUT",
 		"IN3_AUX", "AUX_OUT",
 		"TX SWR_ADC0", "ADC1_OUTPUT",
-		"TX SWR_ADC2", "ADC2_OUTPUT",
-		"TX SWR_ADC3", "ADC3_OUTPUT",
-		"TX SWR_ADC4", "ADC4_OUTPUT",
+		"TX SWR_ADC1", "ADC2_OUTPUT",
+		"TX SWR_ADC2", "ADC3_OUTPUT",
+		"TX SWR_ADC3", "ADC4_OUTPUT",
 		"TX SWR_DMIC0", "DMIC1_OUTPUT",
 		"TX SWR_DMIC1", "DMIC2_OUTPUT",
 		"TX SWR_DMIC2", "DMIC3_OUTPUT",
@@ -227,14 +229,33 @@
 		"RX_TX DEC2_INP", "TX DEC2 MUX",
 		"RX_TX DEC3_INP", "TX DEC3 MUX",
 		"SpkrLeft IN", "WSA_SPK1 OUT",
-		"SpkrRight IN", "WSA_SPK2 OUT";
+		"SpkrRight IN", "WSA_SPK2 OUT",
+		"VA DMIC0", "MIC BIAS3",
+		"VA DMIC1", "MIC BIAS3",
+		"VA DMIC2", "MIC BIAS1",
+		"VA DMIC3", "MIC BIAS1",
+		"VA DMIC4", "MIC BIAS4",
+		"VA DMIC5", "MIC BIAS4",
+		"VA SWR_ADC0", "ADC1_OUTPUT",
+		"VA SWR_ADC1", "ADC2_OUTPUT",
+		"VA SWR_ADC2", "ADC3_OUTPUT",
+		"VA SWR_ADC3", "ADC4_OUTPUT",
+		"VA SWR_MIC0", "DMIC1_OUTPUT",
+		"VA SWR_MIC1", "DMIC2_OUTPUT",
+		"VA SWR_MIC2", "DMIC3_OUTPUT",
+		"VA SWR_MIC3", "DMIC4_OUTPUT",
+		"VA SWR_MIC4", "DMIC5_OUTPUT",
+		"VA SWR_MIC5", "DMIC6_OUTPUT",
+		"VA SWR_MIC6", "DMIC7_OUTPUT",
+		"VA SWR_MIC7", "DMIC8_OUTPUT";
 	qcom,msm-mbhc-hphl-swh = <1>;
 	qcom,msm-mbhc-gnd-swh = <1>;
 	qcom,cdc-dmic01-gpios = <&cdc_dmic01_gpios>;
 	qcom,cdc-dmic23-gpios = <&cdc_dmic23_gpios>;
 	qcom,cdc-dmic45-gpios = <&cdc_dmic45_gpios>;
-	asoc-codec  = <&stub_codec>, <&bolero>;
-	asoc-codec-names = "msm-stub-codec.1", "bolero_codec";
+	asoc-codec  = <&stub_codec>, <&bolero>, <&ext_disp_audio_codec>;
+	asoc-codec-names = "msm-stub-codec.1", "bolero_codec",
+			   "msm-ext-disp-audio-codec-rx";
 	qcom,wsa-max-devs = <2>;
 	qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
 			<&wsa881x_0213>, <&wsa881x_0214>;
diff --git a/arch/arm64/boot/dts/qcom/kona-bus.dtsi b/arch/arm64/boot/dts/qcom/kona-bus.dtsi
index 3f01261..717c829 100644
--- a/arch/arm64/boot/dts/qcom/kona-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-bus.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <dt-bindings/msm/msm-bus-ids.h>
@@ -340,7 +340,6 @@
 			qcom,qos-off = <4096>;
 			qcom,base-offset = <8192>;
 			qcom,sbm-offset = <0>;
-			qcom,bypass-qos-prg;
 			qcom,bus-type = <1>;
 			clocks = <>;
 		};
@@ -353,7 +352,6 @@
 			qcom,qos-off = <4096>;
 			qcom,base-offset = <12288>;
 			qcom,sbm-offset = <0>;
-			qcom,bypass-qos-prg;
 			qcom,bus-type = <1>;
 			clocks = <>;
 		};
@@ -366,7 +364,6 @@
 			qcom,qos-off = <2048>;
 			qcom,base-offset = <208896>;
 			qcom,sbm-offset = <0>;
-			qcom,bypass-qos-prg;
 			qcom,bus-type = <1>;
 			clocks = <>;
 		};
@@ -379,7 +376,6 @@
 			qcom,qos-off = <0>;
 			qcom,base-offset = <0>;
 			qcom,sbm-offset = <0>;
-			qcom,bypass-qos-prg;
 			qcom,bus-type = <1>;
 			clocks = <>;
 		};
@@ -392,7 +388,6 @@
 			qcom,qos-off = <0>;
 			qcom,base-offset = <0>;
 			qcom,sbm-offset = <0>;
-			qcom,bypass-qos-prg;
 			qcom,bus-type = <1>;
 			clocks = <>;
 		};
@@ -405,7 +400,6 @@
 			qcom,qos-off = <4096>;
 			qcom,base-offset = <135168>;
 			qcom,sbm-offset = <0>;
-			qcom,bypass-qos-prg;
 			qcom,bus-type = <1>;
 			clocks = <>;
 		};
@@ -442,7 +436,6 @@
 			qcom,qos-off = <2048>;
 			qcom,base-offset = <40960>;
 			qcom,sbm-offset = <0>;
-			qcom,bypass-qos-prg;
 			qcom,bus-type = <1>;
 			clocks = <>;
 		};
@@ -455,7 +448,6 @@
 			qcom,qos-off = <0>;
 			qcom,base-offset = <0>;
 			qcom,sbm-offset = <0>;
-			qcom,bypass-qos-prg;
 			qcom,bus-type = <1>;
 			clocks = <>;
 		};
@@ -468,7 +460,6 @@
 			qcom,qos-off = <4096>;
 			qcom,base-offset = <73728>;
 			qcom,sbm-offset = <0>;
-			qcom,bypass-qos-prg;
 			qcom,bus-type = <1>;
 			clocks = <>;
 		};
@@ -614,6 +605,12 @@
 			qcom,blacklist = <&slv_qns_cnoc>;
 			qcom,ap-owned;
 			qcom,prio = <2>;
+			qcom,node-qos-clks {
+				clocks =
+				<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_CLK>;
+				clock-names =
+				"clk-aggre-ufs-phy-axi-no-rate";
+			};
 		};
 
 		mas_xm_usb3_0: mas-xm-usb3-0 {
@@ -627,6 +624,12 @@
 			qcom,blacklist = <&slv_qns_cnoc>;
 			qcom,ap-owned;
 			qcom,prio = <2>;
+			qcom,node-qos-clks {
+				clocks =
+				<&clock_gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>;
+				clock-names =
+				"clk-usb3-prim-axi-no-rate";
+			};
 		};
 
 		mas_xm_usb3_1: mas-xm-usb3-1 {
@@ -640,6 +643,12 @@
 			qcom,blacklist = <&slv_qns_cnoc>;
 			qcom,ap-owned;
 			qcom,prio = <2>;
+			qcom,node-qos-clks {
+				clocks =
+				<&clock_gcc GCC_AGGRE_USB3_SEC_AXI_CLK>;
+				clock-names =
+				"clk-usb3-sec-axi-no-rate";
+			};
 		};
 
 		mas_qhm_a2noc_cfg: mas-qhm-a2noc-cfg {
@@ -719,6 +728,8 @@
 			qcom,ap-owned;
 			qcom,prio = <2>;
 			qcom,forwarding;
+			qcom,defer-init-qos;
+			qcom,node-qos-bcms = <7035 0 1>;
 		};
 
 		mas_xm_pcie3_0: mas-xm-pcie3-0 {
@@ -969,6 +980,7 @@
 			qcom,ap-owned;
 			qcom,prio = <0>;
 			qcom,forwarding;
+			qcom,node-qos-bcms = <7012 0 1>;
 		};
 
 		mas_qnm_mnoc_sf: mas-qnm-mnoc-sf {
@@ -983,6 +995,7 @@
 			qcom,ap-owned;
 			qcom,prio = <0>;
 			qcom,forwarding;
+			qcom,node-qos-bcms = <7012 0 1>;
 		};
 
 		mas_qnm_pcie: mas-qnm-pcie {
@@ -1064,6 +1077,7 @@
 			qcom,ap-owned;
 			qcom,prio = <0>;
 			qcom,forwarding;
+			qcom,node-qos-bcms = <7012 0 1>;
 		};
 
 		mas_qnm_camnoc_icp: mas-qnm-camnoc-icp {
@@ -1078,6 +1092,7 @@
 			qcom,ap-owned;
 			qcom,prio = <0>;
 			qcom,forwarding;
+			qcom,node-qos-bcms = <7012 0 1>;
 		};
 
 		mas_qnm_camnoc_sf: mas-qnm-camnoc-sf {
@@ -1092,6 +1107,7 @@
 			qcom,ap-owned;
 			qcom,prio = <0>;
 			qcom,forwarding;
+			qcom,node-qos-bcms = <7012 0 1>;
 		};
 
 		mas_qnm_video0: mas-qnm-video0 {
@@ -1106,6 +1122,7 @@
 			qcom,ap-owned;
 			qcom,prio = <0>;
 			qcom,forwarding;
+			qcom,node-qos-bcms = <7012 0 1>;
 		};
 
 		mas_qnm_video1: mas-qnm-video1 {
@@ -1120,6 +1137,7 @@
 			qcom,ap-owned;
 			qcom,prio = <0>;
 			qcom,forwarding;
+			qcom,node-qos-bcms = <7012 0 1>;
 		};
 
 		mas_qnm_video_cvp: mas-qnm-video-cvp {
@@ -1134,6 +1152,7 @@
 			qcom,ap-owned;
 			qcom,prio = <0>;
 			qcom,forwarding;
+			qcom,node-qos-bcms = <7012 0 1>;
 		};
 
 		mas_qxm_mdp0: mas-qxm-mdp0 {
@@ -1148,6 +1167,7 @@
 			qcom,ap-owned;
 			qcom,prio = <0>;
 			qcom,forwarding;
+			qcom,node-qos-bcms = <7012 0 1>;
 		};
 
 		mas_qxm_mdp1: mas-qxm-mdp1 {
@@ -1162,6 +1182,7 @@
 			qcom,ap-owned;
 			qcom,prio = <0>;
 			qcom,forwarding;
+			qcom,node-qos-bcms = <7012 0 1>;
 		};
 
 		mas_qxm_rot: mas-qxm-rot {
@@ -1175,6 +1196,7 @@
 			qcom,ap-owned;
 			qcom,prio = <0>;
 			qcom,forwarding;
+			qcom,node-qos-bcms = <7012 0 1>;
 		};
 
 		mas_amm_npu_sys: mas-amm-npu-sys {
@@ -1287,6 +1309,15 @@
 			qcom,forwarding;
 		};
 
+		mas_alc: mas-alc {
+			cell-id = <MSM_BUS_MASTER_ALC>;
+			label = "mas-alc";
+			qcom,buswidth = <1>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mc_virt>;
+			qcom,bcms = <&bcm_alc>;
+		};
+
 		mas_qnm_mnoc_hf_display: mas-qnm-mnoc-hf_display {
 			cell-id = <MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY>;
 			label = "mas-qnm-mnoc-hf_display";
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
index 4e6df1f..09baf55 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
@@ -124,6 +124,7 @@
 		sensor-position-yaw = <180>;
 		actuator-src = <&actuator_rear>;
 		eeprom-src = <&eeprom_rear>;
+		led-flash-src = <&led_flash_rear>;
 		cam_vio-supply = <&pm8009_l7>;
 		cam_bob-supply = <&pm8150a_bob>;
 		cam_vana-supply = <&pm8009_l5>;
@@ -166,6 +167,7 @@
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <180>;
 		eeprom-src = <&eeprom_rear_aux>;
+		led-flash-src = <&led_flash_rear_aux>;
 		cam_bob-supply = <&pm8150a_bob>;
 		cam_vdig-supply = <&pm8009_l2>;
 		cam_vio-supply = <&pm8009_l7>;
@@ -304,14 +306,12 @@
 		pinctrl-1 = <&cam_sensor_mclk3_suspend
 				 &cam_sensor_suspend_3>;
 		gpios = <&tlmm 97 0>,
-			<&tlmm 109 0>,
-			<&pm8009_gpios 1 0>;
+			<&tlmm 109 0>;
 		gpio-reset = <1>;
-		gpio-req-tbl-num = <0 1 2>;
-		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
 		gpio-req-tbl-label = "CAMIF_MCLK3",
-					"CAM_RESET3",
-					"TOF_VDD_EN";
+					"CAM_RESET3";
 		sensor-mode = <0>;
 		cci-master = <1>;
 		status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
index 4e6df1f..514349c 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
@@ -123,6 +123,7 @@
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <180>;
 		actuator-src = <&actuator_rear>;
+		led-flash-src = <&led_flash_rear>;
 		eeprom-src = <&eeprom_rear>;
 		cam_vio-supply = <&pm8009_l7>;
 		cam_bob-supply = <&pm8150a_bob>;
@@ -166,6 +167,7 @@
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <180>;
 		eeprom-src = <&eeprom_rear_aux>;
+		led-flash-src = <&led_flash_rear_aux>;
 		cam_bob-supply = <&pm8150a_bob>;
 		cam_vdig-supply = <&pm8009_l2>;
 		cam_vio-supply = <&pm8009_l7>;
@@ -243,7 +245,7 @@
 		cell-index = <2>;
 		compatible = "qcom,cam-sensor";
 		csiphy-sd-index = <2>;
-		sensor-position-roll = <90>;
+		sensor-position-roll = <270>;
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <0>;
 		eeprom-src = <&eeprom_front>;
@@ -304,14 +306,12 @@
 		pinctrl-1 = <&cam_sensor_mclk3_suspend
 				 &cam_sensor_suspend_3>;
 		gpios = <&tlmm 97 0>,
-			<&tlmm 109 0>,
-			<&pm8009_gpios 1 0>;
+			<&tlmm 109 0>;
 		gpio-reset = <1>;
-		gpio-req-tbl-num = <0 1 2>;
-		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
 		gpio-req-tbl-label = "CAMIF_MCLK3",
-					"CAM_RESET3",
-					"TOF_VDD_EN";
+					"CAM_RESET3";
 		sensor-mode = <0>;
 		cci-master = <1>;
 		status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
new file mode 100644
index 0000000..1da61ab
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+&soc {
+	led_flash_rear: qcom,camera-flash0 {
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm8150l_flash0 &pm8150l_flash1>;
+		torch-source = <&pm8150l_torch0 &pm8150l_torch1>;
+		switch-source = <&pm8150l_switch2>;
+		status = "ok";
+	};
+
+	led_flash_rear_aux: qcom,camera-flash1 {
+		cell-index = <1>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm8150l_flash0 &pm8150l_flash1>;
+		torch-source = <&pm8150l_torch0 &pm8150l_torch1>;
+		switch-source = <&pm8150l_switch2>;
+		status = "ok";
+	};
+
+	qcom,cam-res-mgr {
+		compatible = "qcom,cam-res-mgr";
+		status = "ok";
+	};
+};
+
+&cam_cci0 {
+	actuator_rear: qcom,actuator0 {
+		cell-index = <0>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2856000>;
+		rgltr-max-voltage = <3104000>;
+		rgltr-load-current = <0>;
+	};
+
+	eeprom_rear: qcom,eeprom0 {
+		cell-index = <0>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l5>;
+		cam_vdig-supply = <&pm8009_l1>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_active_rear>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_suspend_rear>;
+		gpios = <&tlmm 94 0>,
+			<&tlmm 93 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_rear_aux: qcom,eeprom1 {
+		cell-index = <1>;
+		compatible = "qcom,eeprom";
+		cam_vdig-supply = <&pm8009_l2>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				 &cam_sensor_active_rear_aux>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				 &cam_sensor_suspend_rear_aux>;
+		gpios = <&tlmm 95 0>,
+			<&tlmm 92 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor0 {
+		cell-index = <0>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		actuator-src = <&actuator_rear>;
+		eeprom-src = <&eeprom_rear>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vana-supply = <&pm8009_l5>;
+		cam_vdig-supply = <&pm8009_l1>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1104000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1104000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_active_rear>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_suspend_rear>;
+		gpios = <&tlmm 94 0>,
+			<&tlmm 93 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor1 {
+		cell-index = <1>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_rear_aux>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vdig-supply = <&pm8009_l2>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1200000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1200000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_active_rear_aux>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_suspend_rear_aux>;
+		gpios = <&tlmm 95 0>,
+			<&tlmm 92 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
+
+&cam_cci1 {
+	eeprom_front: qcom,eeprom2 {
+		cell-index = <2>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_front>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_front>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-position = <1>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor2 {
+		cell-index = <2>;
+		compatible = "qcom,cam-sensor";
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_front>;
+		cam_vio-supply = <&pm8009_l7>;
+		cam_bob-supply = <&pm8150a_bob>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_bob";
+		rgltr-cntrl-support;
+		pwm-switch;
+		rgltr-min-voltage = <0 2800000 1056000 0 3008000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3960000>;
+		rgltr-load-current = <0 80000 1200000 0 2000000>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_front>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_front>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
+
diff --git a/arch/arm64/boot/dts/qcom/kona-camera.dtsi b/arch/arm64/boot/dts/qcom/kona-camera.dtsi
index d01dfd1..d93c443 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera.dtsi
@@ -18,7 +18,8 @@
 		interrupts = <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>;
 		interrupt-names = "csiphy";
 		gdscr-supply = <&titan_top_gdsc>;
-		regulator-names = "gdscr";
+		refgen-supply = <&refgen>;
+		regulator-names = "gdscr", "refgen";
 		csi-vdd-voltage = <1200000>;
 		mipi-csi-vdd-supply = <&pm8150_l9>;
 		clocks = <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
@@ -45,7 +46,8 @@
 		interrupts = <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>;
 		interrupt-names = "csiphy";
 		gdscr-supply = <&titan_top_gdsc>;
-		regulator-names = "gdscr";
+		refgen-supply = <&refgen>;
+		regulator-names = "gdscr", "refgen";
 		csi-vdd-voltage = <1200000>;
 		mipi-csi-vdd-supply = <&pm8150_l9>;
 		clocks = <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
@@ -73,7 +75,8 @@
 		interrupts = <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>;
 		interrupt-names = "csiphy";
 		gdscr-supply = <&titan_top_gdsc>;
-		regulator-names = "gdscr";
+		refgen-supply = <&refgen>;
+		regulator-names = "gdscr", "refgen";
 		csi-vdd-voltage = <1200000>;
 		mipi-csi-vdd-supply = <&pm8150_l9>;
 		clocks = <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
@@ -100,7 +103,8 @@
 		interrupts = <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>;
 		interrupt-names = "csiphy";
 		gdscr-supply = <&titan_top_gdsc>;
-		regulator-names = "gdscr";
+		refgen-supply = <&refgen>;
+		regulator-names = "gdscr", "refgen";
 		csi-vdd-voltage = <1200000>;
 		mipi-csi-vdd-supply = <&pm8150_l9>;
 		clocks = <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
@@ -127,7 +131,8 @@
 		interrupts = <GIC_SPI 86 IRQ_TYPE_EDGE_RISING>;
 		interrupt-names = "csiphy";
 		gdscr-supply = <&titan_top_gdsc>;
-		regulator-names = "gdscr";
+		refgen-supply = <&refgen>;
+		regulator-names = "gdscr", "refgen";
 		csi-vdd-voltage = <1200000>;
 		mipi-csi-vdd-supply = <&pm8150_l9>;
 		clocks = <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
@@ -154,7 +159,8 @@
 		interrupts = <GIC_SPI 89 IRQ_TYPE_EDGE_RISING>;
 		interrupt-names = "csiphy";
 		gdscr-supply = <&titan_top_gdsc>;
-		regulator-names = "gdscr";
+		refgen-supply = <&refgen>;
+		regulator-names = "gdscr", "refgen";
 		csi-vdd-voltage = <1200000>;
 		mipi-csi-vdd-supply = <&pm8150_l9>;
 		clocks = <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
@@ -442,10 +448,10 @@
 				};
 
 				iova-mem-region-shared {
-					/* Shared region is 100MB long */
+					/* Shared region is 150MB long */
 					iova-region-name = "shared";
 					iova-region-start = <0x7400000>;
-					iova-region-len = <0x6400000>;
+					iova-region-len = <0x9600000>;
 					iova-region-id = <0x1>;
 					status = "ok";
 				};
@@ -453,17 +459,17 @@
 				iova-mem-region-secondary-heap {
 					/* Secondary heap region is 1MB long */
 					iova-region-name = "secheap";
-					iova-region-start = <0xd800000>;
+					iova-region-start = <0x10a00000>;
 					iova-region-len = <0x100000>;
 					iova-region-id = <0x4>;
 					status = "ok";
 				};
 
 				iova-mem-region-io {
-					/* IO region is approximately 3 GB */
+					/* IO region is approximately 3.3 GB */
 					iova-region-name = "io";
-					iova-region-start = <0xda00000>;
-					iova-region-len = <0xace00000>;
+					iova-region-start = <0x10c00000>;
+					iova-region-len = <0xcf300000>;
 					iova-region-id = <0x3>;
 					status = "ok";
 				};
@@ -471,7 +477,7 @@
 				iova-mem-qdss-region {
 					/* QDSS region is appropriate 1MB */
 					iova-region-name = "qdss";
-					iova-region-start = <0xd900000>;
+					iova-region-start = <0x10b00000>;
 					iova-region-len = <0x100000>;
 					iova-region-id = <0x5>;
 					qdss-phy-addr = <0x16790000>;
diff --git a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
index 5b90203..6632aca 100644
--- a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
@@ -3,11 +3,12 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-
+#include <dt-bindings/gpio/gpio.h>
 #include "kona-pmic-overlay.dtsi"
 #include "kona-sde-display.dtsi"
 #include "kona-camera-sensor-cdp.dtsi"
 #include "kona-audio-overlay.dtsi"
+#include "kona-thermal-overlay.dtsi"
 
 &qupv3_se12_2uart {
 	status = "ok";
@@ -30,6 +31,29 @@
 			&tert_mi2s_sd0_sleep>;
 };
 
+&qupv3_se1_i2c {
+	status = "ok";
+	qcom,clk-freq-out = <1000000>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 111 0x00>;
+		qcom,nq-ven = <&tlmm 6 0x00>;
+		qcom,nq-firm = <&tlmm 110 0x00>;
+		qcom,nq-clkreq = <&tlmm 7 0x00>;
+		interrupt-parent = <&tlmm>;
+		interrupts = <111 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_enable_active
+				&nfc_clk_req_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend
+				&nfc_clk_req_suspend>;
+	};
+};
+
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v4";
 
@@ -59,11 +83,64 @@
 	status = "ok";
 };
 
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_home_default
+			     &key_vol_up_default>;
+
+		home {
+			label = "home";
+			gpios = <&pm8150_gpios 1 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <KEY_HOME>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8150_gpios 6 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <KEY_VOLUMEUP>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+	};
+};
+
+&qupv3_se13_i2c {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	status = "ok";
+
+	st_fts@49 {
+		compatible = "st,fts";
+		reg = <0x49>;
+		interrupt-parent = <&tlmm>;
+		interrupts = <39 0x2008>;
+		vdd-supply = <&pm8150a_l1>;
+		avdd-supply = <&pm8150_l13>;
+		pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+		pinctrl-0 = <&ts_active>;
+		pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+		st,irq-gpio = <&tlmm 39 0x2008>;
+		st,reset-gpio = <&tlmm 38 0x00>;
+		st,regulator_dvdd = "vdd";
+		st,regulator_avdd = "avdd";
+	};
+};
+
 &vendor {
 	bluetooth: bt_qca6390 {
 		compatible = "qca,qca6390";
 		pinctrl-names = "default";
-		pinctrl-0 = <&bt_en_active>;
+		pinctrl-0 = <&bt_en_sleep>;
 		qca,bt-reset-gpio = <&tlmm 21 0>; /* BT_EN */
 		qca,bt-vdd-aon-supply = <&pm8150_s6>;
 		qca,bt-vdd-dig-supply = <&pm8009_s2>;
@@ -87,6 +164,87 @@
 	qcom,platform-reset-gpio = <&tlmm 75 0>;
 };
 
+&dsi_sw43404_amoled_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <1023>;
+	qcom,mdss-brightness-max-level = <255>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sw43404_amoled_fhd_plus_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <1023>;
+	qcom,mdss-brightness-max-level = <255>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sharp_4k_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-en-gpio = <&tlmm 60 0>;
+};
+
+&dsi_sharp_4k_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-en-gpio = <&tlmm 60 0>;
+};
+
+&dsi_sharp_1080_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_nt35597_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_nt35597_truly_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_nt35695b_truly_fhd_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_nt35695b_truly_fhd_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply_avdd>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_external";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
 &sde_dsi {
 	qcom,dsi-default-panel = <&dsi_sw43404_amoled_cmd>;
 };
@@ -173,14 +331,6 @@
 		qcom,pre-scaling = <1 3>;
 	};
 
-	xo_therm@4c {
-		reg = <ADC_XO_THERM_PU2>;
-		label = "xo_therm";
-		qcom,ratiometric;
-		qcom,hw-settle-time = <200>;
-		qcom,pre-scaling = <1 1>;
-	};
-
 	skin_therm@4d {
 		reg = <ADC_AMUX_THM1_PU2>;
 		label = "skin_therm";
@@ -248,12 +398,6 @@
 	#address-cells = <1>;
 	#size-cells = <0>;
 
-	xo_therm@4c {
-		reg = <ADC_XO_THERM_PU2>;
-		qcom,ratiometric;
-		qcom,hw-settle-time = <200>;
-	};
-
 	skin_therm@4d {
 		reg = <ADC_AMUX_THM1_PU2>;
 		qcom,ratiometric;
@@ -319,9 +463,9 @@
 		"IN2_HPHR", "HPHR_OUT",
 		"IN3_AUX", "AUX_OUT",
 		"TX SWR_ADC0", "ADC1_OUTPUT",
-		"TX SWR_ADC2", "ADC2_OUTPUT",
-		"TX SWR_ADC3", "ADC3_OUTPUT",
-		"TX SWR_ADC4", "ADC4_OUTPUT",
+		"TX SWR_ADC1", "ADC2_OUTPUT",
+		"TX SWR_ADC2", "ADC3_OUTPUT",
+		"TX SWR_ADC3", "ADC4_OUTPUT",
 		"TX SWR_DMIC0", "DMIC1_OUTPUT",
 		"TX SWR_DMIC1", "DMIC2_OUTPUT",
 		"TX SWR_DMIC2", "DMIC3_OUTPUT",
@@ -338,5 +482,128 @@
 		"RX_TX DEC2_INP", "TX DEC2 MUX",
 		"RX_TX DEC3_INP", "TX DEC3 MUX",
 		"SpkrLeft IN", "WSA_SPK1 OUT",
-		"SpkrRight IN", "WSA_SPK2 OUT";
+		"SpkrRight IN", "WSA_SPK2 OUT",
+		"VA DMIC0", "MIC BIAS3",
+		"VA DMIC1", "MIC BIAS3",
+		"VA DMIC2", "MIC BIAS1",
+		"VA DMIC3", "MIC BIAS1",
+		"VA DMIC4", "MIC BIAS4",
+		"VA DMIC5", "MIC BIAS4",
+		"VA SWR_ADC0", "ADC1_OUTPUT",
+		"VA SWR_ADC1", "ADC2_OUTPUT",
+		"VA SWR_ADC2", "ADC3_OUTPUT",
+		"VA SWR_ADC3", "ADC4_OUTPUT",
+		"VA SWR_MIC0", "DMIC1_OUTPUT",
+		"VA SWR_MIC1", "DMIC2_OUTPUT",
+		"VA SWR_MIC2", "DMIC3_OUTPUT",
+		"VA SWR_MIC3", "DMIC4_OUTPUT",
+		"VA SWR_MIC4", "DMIC5_OUTPUT",
+		"VA SWR_MIC5", "DMIC6_OUTPUT",
+		"VA SWR_MIC6", "DMIC7_OUTPUT",
+		"VA SWR_MIC7", "DMIC8_OUTPUT";
 };
+
+&thermal_zones {
+	wp-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150b_adc_tm ADC_AMUX_THM1_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	skin-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150_adc_tm ADC_AMUX_THM1_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	pa-therm1-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150_adc_tm ADC_AMUX_THM2_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	camera-flash-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM1_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	skin-msm-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM2_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	pa-therm2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM3_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+};
+
+&sdhc_2 {
+	vdd-supply = <&pm8150a_l9>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8150a_l6>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
+
+	cd-gpios = <&tlmm 77 GPIO_ACTIVE_LOW>;
+
+	status = "ok";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/kona-coresight.dtsi b/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
index f564a31..845a05e 100644
--- a/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
@@ -1911,49 +1911,97 @@
 		clock-names = "apb_pclk";
 	};
 
-	cti0_ddr0: cti@6a02000 {
+	cti0_ddr0: cti@6e01000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x000bb966>;
-		reg = <0x6a02000 0x1000>;
+		reg = <0x6e01000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti-ddr_dl_0_cti0";
+		coresight-name = "coresight-cti-ddr_dl_0_cti_0";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 	};
 
-	cti1_ddr0: cti@6a03000 {
+	cti1_ddr0: cti@6e02000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x000bb966>;
-		reg = <0x6a03000 0x1000>;
+		reg = <0x6e02000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti-ddr_dl_0_cti1";
+		coresight-name = "coresight-cti-ddr_dl_0_cti_1";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 	};
 
-	cti0_ddr1: cti@6a10000 {
+	cti2_ddr0: cti@6e03000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x000bb966>;
-		reg = <0x6a10000 0x1000>;
+		reg = <0x6e03000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti-ddr_dl_1_cti0";
+		coresight-name = "coresight-cti-ddr_dl_0_cti_2";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 	};
 
-	cti1_ddr1: cti@6a11000 {
+	cti0_ddr1: cti@6e0c000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x000bb966>;
-		reg = <0x6a11000 0x1000>;
+		reg = <0x6e0c000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti-ddr_dl_1_cti1";
+		coresight-name = "coresight-cti-ddr_dl_1_cti_0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti1_ddr1: cti@6e0d000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6e0d000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ddr_dl_1_cti_1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti2_ddr1: cti@6e0e000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6e0e000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ddr_dl_1_cti_2";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_ddr_ch02: cti@6e11000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6e11000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ddr_ch02_dl_cti_0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti_ddr_ch13: cti@6e21000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x000bb966>;
+		reg = <0x6e21000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-ddr_ch13_dl_cti_0";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
diff --git a/arch/arm64/boot/dts/qcom/kona-gpu.dtsi b/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
index bef41c1..82a7cfb 100644
--- a/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
@@ -25,21 +25,6 @@
 	gpu_opp_table: gpu-opp-table {
 		compatible = "operating-points-v2";
 
-		opp-700000000 {
-			opp-hz = /bits/ 64 <700000000>;
-			opp-microvolt = <RPMH_REGULATOR_LEVEL_TURBO>;
-		};
-
-		opp-670000000 {
-			opp-hz = /bits/ 64 <670000000>;
-			opp-microvolt = <RPMH_REGULATOR_LEVEL_NOM_L1>;
-		};
-
-		opp-625000000 {
-			opp-hz = /bits/ 64 <625000000>;
-			opp-microvolt = <RPMH_REGULATOR_LEVEL_NOM>;
-		};
-
 		opp-480000000 {
 			opp-hz = /bits/ 64 <480000000>;
 			opp-microvolt = <RPMH_REGULATOR_LEVEL_SVS_L1>;
@@ -61,17 +46,18 @@
 		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
 		status = "ok";
 		reg = <0x3d00000 0x40000>, <0x3d61000 0x800>,
-			<0x3de0000 0x10000>;
-		reg-names = "kgsl_3d0_reg_memory", "cx_dbgc", "rscc";
-		interrupts = <0 300 IRQ_TYPE_NONE>;
+			<0x3de0000 0x10000>, <0x3d8b000 0x2000>;
+		reg-names = "kgsl_3d0_reg_memory", "cx_dbgc", "rscc",
+				"isense_cntl";
+		interrupts = <0 300 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "kgsl_3d0_irq";
 		qcom,id = <0>;
 
 		qcom,chipid = <0x06050000>;
 
-		qcom,initial-pwrlevel = <5>;
+		qcom,initial-pwrlevel = <2>;
 
-		qcom,idle-timeout = <1000000>; /* msecs */
+		qcom,idle-timeout = <80>; /* msecs */
 
 		qcom,no-nap;
 
@@ -105,7 +91,7 @@
 
 		/* Bus Scale Settings */
 		qcom,gpubw-dev = <&gpubw>;
-		//qcom,bus-control;
+		qcom,bus-control;
 		qcom,msm-bus,name = "grp3d";
 		qcom,bus-width = <32>;
 		qcom,msm-bus,num-cases = <13>;
@@ -199,54 +185,30 @@
 
 			qcom,gpu-pwrlevel@0 {
 				reg = <0>;
-				qcom,gpu-freq = <700000000>;
-				qcom,bus-freq = <12>;
-				qcom,bus-min = <10>;
-				qcom,bus-max = <12>;
-			};
-
-			qcom,gpu-pwrlevel@1 {
-				reg = <1>;
-				qcom,gpu-freq = <670000000>;
-				qcom,bus-freq = <11>;
-				qcom,bus-min = <9>;
-				qcom,bus-max = <11>;
-			};
-
-			qcom,gpu-pwrlevel@2 {
-				reg = <2>;
-				qcom,gpu-freq = <625000000>;
-				qcom,bus-freq = <10>;
-				qcom,bus-min = <8>;
-				qcom,bus-max = <10>;
-			};
-
-			qcom,gpu-pwrlevel@3 {
-				reg = <3>;
 				qcom,gpu-freq = <480000000>;
 				qcom,bus-freq = <8>;
 				qcom,bus-min = <7>;
 				qcom,bus-max = <9>;
 			};
 
-			qcom,gpu-pwrlevel@4 {
-				reg = <4>;
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
 				qcom,gpu-freq = <381000000>;
 				qcom,bus-freq = <5>;
 				qcom,bus-min = <5>;
 				qcom,bus-max = <7>;
 			};
 
-			qcom,gpu-pwrlevel@5 {
-				reg = <5>;
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
 				qcom,gpu-freq = <290000000>;
 				qcom,bus-freq = <4>;
 				qcom,bus-min = <3>;
 				qcom,bus-max = <5>;
 			};
 
-			qcom,gpu-pwrlevel@6 {
-				reg = <6>;
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
 				qcom,gpu-freq = <0>;
 				qcom,bus-freq = <0>;
 				qcom,bus-min = <0>;
@@ -300,7 +262,8 @@
 			"kgsl_gmu_pdc_cfg",
 			"kgsl_gmu_pdc_seq";
 
-		interrupts = <0 304 IRQ_TYPE_NONE>, <0 305 IRQ_TYPE_NONE>;
+		interrupts = <0 304 IRQ_TYPE_LEVEL_HIGH>,
+						<0 305 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "kgsl_hfi_irq", "kgsl_gmu_irq";
 
 		qcom,msm-bus,name = "cnoc";
diff --git a/arch/arm64/boot/dts/qcom/kona-lpi.dtsi b/arch/arm64/boot/dts/qcom/kona-lpi.dtsi
index 687ffe0..9b03761 100644
--- a/arch/arm64/boot/dts/qcom/kona-lpi.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-lpi.dtsi
@@ -7,6 +7,7 @@
 	lpi_tlmm: lpi_pinctrl@33c0000 {
 		compatible = "qcom,lpi-pinctrl";
 		reg = <0x33c0000 0x0>;
+		qcom,slew-reg = <0x355a000 0x0>;
 		qcom,num-gpios = <14>;
 		gpio-controller;
 		#gpio-cells = <2>;
@@ -17,6 +18,13 @@
 				      <0x00008000>, <0x00009000>,
 				      <0x0000A000>, <0x0000B000>,
 				      <0x0000C000>, <0x0000D000>;
+		qcom,lpi-slew-offset-tbl = <0x00000000>, <0x00000002>,
+					   <0x00000004>, <0x00000008>,
+					   <0x0000000A>, <0x0000000C>,
+					   <0x00000000>, <0x00000000>,
+					   <0x00000000>, <0x00000000>,
+					   <0x00000010>, <0x00000012>,
+					   <0x00000000>, <0x00000000>;
 
 		quat_mi2s_sck {
 			quat_mi2s_sck_sleep: quat_mi2s_sck_sleep {
@@ -1288,7 +1296,8 @@
 				config {
 					pins = "gpio10";
 					drive-strength = <2>;
-					bias-bus-hold;
+					input-enable;
+					bias-pull-down;
 				};
 			};
 
@@ -1301,7 +1310,8 @@
 				config {
 					pins = "gpio10";
 					drive-strength = <2>;
-					bias-bus-hold;
+					slew-rate = <1>;
+					bias-disable;
 				};
 			};
 		};
@@ -1315,8 +1325,9 @@
 
 				config {
 					pins = "gpio11";
-					drive-strength = <4>;
-					bias-bus-hold;
+					drive-strength = <2>;
+					input-enable;
+					bias-pull-down;
 				};
 			};
 
@@ -1328,7 +1339,8 @@
 
 				config {
 					pins = "gpio11";
-					drive-strength = <4>;
+					drive-strength = <2>;
+					slew-rate = <1>;
 					bias-bus-hold;
 				};
 			};
@@ -1338,12 +1350,13 @@
 			mux {
 				pins = "gpio0";
 				function = "func1";
+				input-enable;
+				bias-pull-down;
 			};
 
 			config {
 				pins = "gpio0";
 				drive-strength = <2>;
-				bias-bus-hold;
 			};
 		};
 
@@ -1355,8 +1368,9 @@
 
 			config {
 				pins = "gpio0";
-				drive-strength = <8>;
-				bias-bus-hold;
+				drive-strength = <2>;
+				slew-rate = <1>;
+				bias-disable;
 			};
 		};
 
@@ -1369,7 +1383,8 @@
 			config {
 				pins = "gpio1";
 				drive-strength = <2>;
-				bias-bus-hold;
+				input-enable;
+				bias-pull-down;
 			};
 		};
 
@@ -1381,7 +1396,8 @@
 
 			config {
 				pins = "gpio1";
-				drive-strength = <8>;
+				drive-strength = <2>;
+				slew-rate = <1>;
 				bias-bus-hold;
 			};
 		};
@@ -1395,7 +1411,8 @@
 			config {
 				pins = "gpio2";
 				drive-strength = <2>;
-				bias-bus-hold;
+				input-enable;
+				bias-pull-down;
 			};
 		};
 
@@ -1407,7 +1424,8 @@
 
 			config {
 				pins = "gpio2";
-				drive-strength = <8>;
+				drive-strength = <2>;
+				slew-rate = <1>;
 				bias-bus-hold;
 			};
 		};
@@ -1421,7 +1439,8 @@
 			config {
 				pins = "gpio3";
 				drive-strength = <2>;
-				bias-bus-hold;
+				input-enable;
+				bias-pull-down;
 			};
 		};
 
@@ -1433,8 +1452,9 @@
 
 			config {
 				pins = "gpio3";
-				drive-strength = <8>;
-				bias-bus-hold;
+				drive-strength = <2>;
+				slew-rate = <1>;
+				bias-disable;
 			};
 		};
 
@@ -1447,7 +1467,8 @@
 			config {
 				pins = "gpio4";
 				drive-strength = <2>;
-				bias-bus-hold;
+				input-enable;
+				bias-pull-down;
 			};
 		};
 
@@ -1459,7 +1480,8 @@
 
 			config {
 				pins = "gpio4";
-				drive-strength = <8>;
+				drive-strength = <2>;
+				slew-rate = <1>;
 				bias-bus-hold;
 			};
 		};
@@ -1473,7 +1495,8 @@
 			config {
 				pins = "gpio5";
 				drive-strength = <2>;
-				bias-bus-hold;
+				input-enable;
+				bias-pull-down;
 			};
 		};
 
@@ -1485,7 +1508,8 @@
 
 			config {
 				pins = "gpio5";
-				drive-strength = <8>;
+				drive-strength = <2>;
+				slew-rate = <1>;
 				bias-bus-hold;
 			};
 		};
diff --git a/arch/arm64/boot/dts/qcom/kona-mhi.dtsi b/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
index 009fbce..a70b183 100644
--- a/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
@@ -19,7 +19,7 @@
 		esoc-0 = <&mdm0>;
 
 		/* mhi bus specific settings */
-		mhi,max-channels = <106>;
+		mhi,max-channels = <110>;
 		mhi,timeout = <2000>;
 
 		mhi_channels: mhi_channels {
@@ -253,8 +253,8 @@
 			mhi_chan@25 {
 				reg = <25>;
 				label = "BL";
-				mhi,num-elements = <64>;
-				mhi,event-ring = <2>;
+				mhi,num-elements = <32>;
+				mhi,event-ring = <1>;
 				mhi,chan-dir = <2>;
 				mhi,data-type = <0>;
 				mhi,doorbell-mode = <2>;
@@ -330,7 +330,7 @@
 			mhi_chan@52 {
 				reg = <52>;
 				label = "SLPI_0";
-				mhi,event-ring = <4>;
+				mhi,event-ring = <5>;
 				mhi,chan-dir = <0>;
 				mhi,data-type = <3>;
 				mhi,ee = <0x4>;
@@ -340,7 +340,7 @@
 			mhi_chan@53 {
 				reg = <53>;
 				label = "SLPI_1";
-				mhi,event-ring = <4>;
+				mhi,event-ring = <5>;
 				mhi,chan-dir = <0>;
 				mhi,data-type = <3>;
 				mhi,ee = <0x4>;
@@ -350,7 +350,7 @@
 			mhi_chan@70 {
 				reg = <70>;
 				label = "ADSP_2";
-				mhi,event-ring = <5>;
+				mhi,event-ring = <4>;
 				mhi,chan-dir = <0>;
 				mhi,data-type = <3>;
 				mhi,ee = <0x4>;
@@ -360,7 +360,7 @@
 			mhi_chan@71 {
 				reg = <71>;
 				label = "ADSP_3";
-				mhi,event-ring = <5>;
+				mhi,event-ring = <4>;
 				mhi,chan-dir = <0>;
 				mhi,data-type = <3>;
 				mhi,ee = <0x4>;
@@ -387,11 +387,22 @@
 				mhi,offload-chan;
 			};
 
+			mhi_chan@80 {
+				reg = <80>;
+				label = "AUDIO_VOICE_0";
+				mhi,event-ring = <0>;
+				mhi,chan-dir = <0>;
+				mhi,ee = <0x4>;
+				mhi,data-type = <3>;
+				mhi,offload-chan;
+				status = "ok";
+			};
+
 			mhi_chan@100 {
 				reg = <100>;
 				label = "IP_HW0";
 				mhi,num-elements = <512>;
-				mhi,event-ring = <4>;
+				mhi,event-ring = <6>;
 				mhi,chan-dir = <1>;
 				mhi,data-type = <1>;
 				mhi,doorbell-mode = <3>;
@@ -403,12 +414,68 @@
 				reg = <101>;
 				label = "IP_HW0";
 				mhi,num-elements = <512>;
-				mhi,event-ring = <5>;
+				mhi,event-ring = <7>;
 				mhi,chan-dir = <2>;
-				mhi,data-type = <1>;
+				mhi,data-type = <4>;
 				mhi,doorbell-mode = <3>;
 				mhi,ee = <0x4>;
 			};
+
+			mhi_chan@102 {
+				reg = <102>;
+				label = "IP_HW_ADPL";
+				mhi,event-ring = <8>;
+				mhi,chan-dir = <2>;
+				mhi,data-type = <3>;
+				mhi,ee = <0x4>;
+				mhi,offload-chan;
+				mhi,lpm-notify;
+			};
+
+			mhi_chan@103 {
+				reg = <103>;
+				label = "IP_HW_QDSS";
+				mhi,num-elements = <128>;
+				mhi,event-ring = <9>;
+				mhi,chan-dir = <2>;
+				mhi,data-type = <0>;
+				mhi,doorbell-mode = <2>;
+				mhi,ee = <0x4>;
+			};
+
+			mhi_chan@104 {
+				reg = <104>;
+				label = "IP_HW0_RSC";
+				mhi,num-elements = <512>;
+				mhi,local-elements = <3078>;
+				mhi,event-ring = <7>;
+				mhi,chan-dir = <2>;
+				mhi,data-type = <5>;
+				mhi,doorbell-mode = <3>;
+				mhi,ee = <0x4>;
+				mhi,chan-type = <3>;
+			};
+
+			mhi_chan@107 {
+				reg = <107>;
+				label = "IP_HW_MHIP_1";
+				mhi,event-ring = <10>;
+				mhi,chan-dir = <1>;
+				mhi,data-type = <3>;
+				mhi,ee = <0x4>;
+				mhi,offload-chan;
+			};
+
+			mhi_chan@108 {
+				reg = <108>;
+				label = "IP_HW_MHIP_1";
+				mhi,event-ring = <11>;
+				mhi,chan-dir = <2>;
+				mhi,data-type = <3>;
+				mhi,ee = <0x4>;
+				mhi,offload-chan;
+				mhi,lpm-notify;
+			};
 		};
 
 		mhi_events: mhi_events {
@@ -450,6 +517,26 @@
 			};
 
 			mhi_event@4 {
+				mhi,num-elements = <512>;
+				mhi,intmod = <5>;
+				mhi,msi = <0>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,client-manage;
+				mhi,offload;
+			};
+
+			mhi_event@5 {
+				mhi,num-elements = <512>;
+				mhi,intmod = <5>;
+				mhi,msi = <0>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,client-manage;
+				mhi,offload;
+			};
+
+			mhi_event@6 {
 				mhi,num-elements = <1024>;
 				mhi,intmod = <5>;
 				mhi,msi = <5>;
@@ -457,11 +544,9 @@
 				mhi,priority = <1>;
 				mhi,brstmode = <3>;
 				mhi,hw-ev;
-				mhi,client-manage;
-				mhi,offload;
 			};
 
-			mhi_event@5 {
+			mhi_event@7 {
 				mhi,num-elements = <1024>;
 				mhi,intmod = <5>;
 				mhi,msi = <6>;
@@ -470,6 +555,51 @@
 				mhi,brstmode = <3>;
 				mhi,hw-ev;
 				mhi,client-manage;
+			};
+
+			mhi_event@8 {
+				mhi,num-elements = <0>;
+				mhi,intmod = <0>;
+				mhi,msi = <0>;
+				mhi,chan = <102>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,hw-ev;
+				mhi,client-manage;
+				mhi,offload;
+			};
+
+			mhi_event@9 {
+				mhi,num-elements = <1024>;
+				mhi,intmod = <5>;
+				mhi,msi = <7>;
+				mhi,chan = <103>;
+				mhi,priority = <1>;
+				mhi,brstmode = <2>;
+				mhi,hw-ev;
+			};
+
+			mhi_event@10 {
+				mhi,num-elements = <0>;
+				mhi,intmod = <0>;
+				mhi,msi = <0>;
+				mhi,chan = <107>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,hw-ev;
+				mhi,client-manage;
+				mhi,offload;
+			};
+
+			mhi_event@11 {
+				mhi,num-elements = <0>;
+				mhi,intmod = <0>;
+				mhi,msi = <0>;
+				mhi,chan = <108>;
+				mhi,priority = <1>;
+				mhi,brstmode = <3>;
+				mhi,hw-ev;
+				mhi,client-manage;
 				mhi,offload;
 			};
 		};
@@ -483,13 +613,14 @@
 				mhi,chan = "IP_HW0";
 				mhi,interface-name = "rmnet_mhi";
 				mhi,mru = <0x4000>;
+				mhi,chain-skb;
 			};
 
-			mhi_netdev_1: mhi_rmnet@1 {
+			mhi_rmnet@1 {
 				reg = <0x1>;
-				mhi,chan = "IP_HW_ADPL";
-				mhi,interface-name = "rmnet_mhi";
-				mhi,mru = <0x4000>;
+				mhi,chan = "IP_HW0_RSC";
+				mhi,mru = <0x8000>;
+				mhi,rsc-parent = <&mhi_netdev_0>;
 			};
 
 			mhi_qrtr {
@@ -500,13 +631,13 @@
 			mhi_subsys_adsp_0: mhi_dev@2 {
 				reg = <0x2>;
 				mhi,chan = "ADSP_0";
-				mhi,num-devices = <4>;
+				mhi,max-devices = <4>;
 			};
 
 			mhi_subsys_slpi_0: mhi_dev@3 {
 				reg = <0x3>;
 				mhi,chan = "SLPI_0";
-				mhi,num-devices = <4>;
+				mhi,max-devices = <4>;
 			};
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
index f4270d1..84bf346 100644
--- a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
@@ -3,11 +3,12 @@
  *  Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
-
+#include <dt-bindings/gpio/gpio.h>
 #include "kona-pmic-overlay.dtsi"
 #include "kona-sde-display.dtsi"
 #include "kona-camera-sensor-mtp.dtsi"
 #include "kona-audio-overlay.dtsi"
+#include "kona-thermal-overlay.dtsi"
 
 &qupv3_se12_2uart {
 	status = "ok";
@@ -30,6 +31,29 @@
 			&tert_mi2s_sd0_sleep>;
 };
 
+&qupv3_se1_i2c {
+	status = "ok";
+	qcom,clk-freq-out = <1000000>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 111 0x00>;
+		qcom,nq-ven = <&tlmm 6 0x00>;
+		qcom,nq-firm = <&tlmm 110 0x00>;
+		qcom,nq-clkreq = <&tlmm 7 0x00>;
+		interrupt-parent = <&tlmm>;
+		interrupts = <111 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_enable_active
+				&nfc_clk_req_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend
+				&nfc_clk_req_suspend>;
+	};
+};
+
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v4";
 
@@ -59,11 +83,56 @@
 	status = "ok";
 };
 
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_vol_up_default>;
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8150_gpios 6 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <KEY_VOLUMEUP>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+	};
+};
+
+&qupv3_se13_i2c {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	status = "ok";
+
+	st_fts@49 {
+		compatible = "st,fts";
+		reg = <0x49>;
+		interrupt-parent = <&tlmm>;
+		interrupts = <39 0x2008>;
+		vdd-supply = <&pm8150a_l1>;
+		avdd-supply = <&pm8150_l13>;
+		pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+		pinctrl-0 = <&ts_active>;
+		pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+		st,irq-gpio = <&tlmm 39 0x2008>;
+		st,reset-gpio = <&tlmm 38 0x00>;
+		st,regulator_dvdd = "vdd";
+		st,regulator_avdd = "avdd";
+		st,x-flip = <1>;
+		st,y-flip = <1>;
+	};
+};
+
 &vendor {
 	bluetooth: bt_qca6390 {
 		compatible = "qca,qca6390";
 		pinctrl-names = "default";
-		pinctrl-0 = <&bt_en_active>;
+		pinctrl-0 = <&bt_en_sleep>;
 		qca,bt-reset-gpio = <&tlmm 21 0>; /* BT_EN */
 		qca,bt-vdd-aon-supply = <&pm8150_s6>;
 		qca,bt-vdd-dig-supply = <&pm8009_s2>;
@@ -82,6 +151,14 @@
 	};
 };
 
+&vreg_hap_boost {
+	status = "ok";
+};
+
+&pm8150b_haptics {
+	vdd-supply = <&vreg_hap_boost>;
+};
+
 &pm8150b_vadc {
 	#address-cells = <1>;
 	#size-cells = <0>;
@@ -185,14 +262,6 @@
 		qcom,pre-scaling = <1 3>;
 	};
 
-	xo_therm@4c {
-		reg = <ADC_XO_THERM_PU2>;
-		label = "xo_therm";
-		qcom,ratiometric;
-		qcom,hw-settle-time = <200>;
-		qcom,pre-scaling = <1 1>;
-	};
-
 	skin_therm@4d {
 		reg = <ADC_AMUX_THM1_PU2>;
 		label = "skin_therm";
@@ -260,12 +329,6 @@
 	#address-cells = <1>;
 	#size-cells = <0>;
 
-	xo_therm@4c {
-		reg = <ADC_XO_THERM_PU2>;
-		qcom,ratiometric;
-		qcom,hw-settle-time = <200>;
-	};
-
 	skin_therm@4d {
 		reg = <ADC_AMUX_THM1_PU2>;
 		qcom,ratiometric;
@@ -312,6 +375,130 @@
 	qcom,platform-reset-gpio = <&tlmm 75 0>;
 };
 
+&dsi_sw43404_amoled_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <1023>;
+	qcom,mdss-brightness-max-level = <255>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sw43404_amoled_fhd_plus_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <1023>;
+	qcom,mdss-brightness-max-level = <255>;
+	qcom,platform-te-gpio = <&tlmm 66 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
 &sde_dsi {
 	qcom,dsi-default-panel = <&dsi_sw43404_amoled_cmd>;
 };
+
+&thermal_zones {
+	wp-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150b_adc_tm ADC_AMUX_THM1_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	skin-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150_adc_tm ADC_AMUX_THM1_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	pa-therm1-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150_adc_tm ADC_AMUX_THM2_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	camera-flash-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM1_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	skin-msm-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM2_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	pa-therm2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM3_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+};
+
+&sdhc_2 {
+	vdd-supply = <&pm8150a_l9>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8150a_l6>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
+
+	cd-gpios = <&tlmm 77 GPIO_ACTIVE_LOW>;
+
+	status = "ok";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/kona-npu.dtsi b/arch/arm64/boot/dts/qcom/kona-npu.dtsi
index 685afca..bf79abb 100644
--- a/arch/arm64/boot/dts/qcom/kona-npu.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-npu.dtsi
@@ -41,17 +41,13 @@
 				<&clock_npucc NPU_CC_DPM_TEMP_CLK>,
 				<&clock_npucc NPU_CC_CAL_HM0_DPM_IP_CLK>,
 				<&clock_npucc NPU_CC_CAL_HM1_DPM_IP_CLK>,
-				<&clock_npucc NPU_CC_DSP_AHBS_CLK>,
-				<&clock_npucc NPU_CC_DSP_AHBM_CLK>,
-				<&clock_npucc NPU_CC_DSP_AXI_CLK>,
-				<&clock_npucc NPU_CC_DSP_BWMON_CLK>,
-				<&clock_npucc NPU_CC_DSP_BWMON_AHB_CLK>,
 				<&clock_npucc NPU_CC_ATB_CLK>,
 				<&clock_npucc NPU_CC_S2P_CLK>,
 				<&clock_npucc NPU_CC_BWMON_CLK>,
 				<&clock_npucc NPU_CC_CAL_HM0_PERF_CNT_CLK>,
 				<&clock_npucc NPU_CC_CAL_HM1_PERF_CNT_CLK>,
-				<&clock_npucc NPU_CC_BTO_CORE_CLK>;
+				<&clock_npucc NPU_CC_BTO_CORE_CLK>,
+				<&clock_npucc NPU_DSP_CORE_CLK_SRC>;
 		clock-names = "xo_clk",
 				"npu_core_clk",
 				"cal_hm0_clk",
@@ -74,17 +70,13 @@
 				"dpm_temp_clk",
 				"cal_hm0_dpm_ip_clk",
 				"cal_hm1_dpm_ip_clk",
-				"dsp_ahbs_clk",
-				"dsp_ahbm_clk",
-				"dsp_axi_clk",
-				"dsp_bwmon_clk",
-				"dsp_bwmon_ahb_clk",
 				"atb_clk",
 				"s2p_clk",
 				"bwmon_clk",
 				"cal_hm0_perf_cnt_clk",
 				"cal_hm1_perf_cnt_clk",
-				"bto_core_clk";
+				"bto_core_clk",
+				"dsp_core_clk_src";
 
 		vdd-supply = <&npu_core_gdsc>;
 		vdd_cx-supply = <&VDD_CX_LEVEL>;
@@ -98,44 +90,6 @@
 			initial-pwrlevel = <4>;
 			qcom,npu-pwrlevel@0 {
 				reg = <0>;
-				vreg = <0>;
-				clk-freq = <19200000
-					60000000
-					200000000
-					200000000
-					200000000
-					200000000
-					120000000
-					20000000
-					200000000
-					60000000
-					19200000
-					50000000
-					50000000
-					60000000
-					60000000
-					60000000
-					19200000
-					60000000
-					19200000
-					50000000
-					200000000
-					200000000
-					60000000
-					60000000
-					120000000
-					19200000
-					60000000
-					30000000
-					50000000
-					19200000
-					200000000
-					200000000
-					19200000>;
-			};
-
-			qcom,npu-pwrlevel@1 {
-				reg = <1>;
 				vreg = <1>;
 				clk-freq = <19200000
 					100000000
@@ -159,21 +113,17 @@
 					50000000
 					200000000
 					200000000
-					100000000
-					100000000
-					200000000
-					19200000
-					100000000
 					60000000
 					50000000
 					19200000
 					300000000
 					300000000
-					19200000>;
+					19200000
+					300000000>;
 			};
 
-			qcom,npu-pwrlevel@2 {
-				reg = <2>;
+			qcom,npu-pwrlevel@1 {
+				reg = <1>;
 				vreg = <2>;
 				clk-freq = <19200000
 					200000000
@@ -197,21 +147,17 @@
 					50000000
 					466000000
 					466000000
-					200000000
-					200000000
-					267000000
-					19200000
-					200000000
 					120000000
 					50000000
 					19200000
 					466000000
 					466000000
-					19200000>;
+					19200000
+					400000000>;
 			};
 
-			qcom,npu-pwrlevel@3 {
-				reg = <3>;
+			qcom,npu-pwrlevel@2 {
+				reg = <2>;
 				vreg = <3>;
 				clk-freq = <19200000
 					333000000
@@ -235,21 +181,17 @@
 					50000000
 					533000000
 					533000000
-					333000000
-					333000000
-					403000000
-					19200000
-					333000000
 					240000000
 					50000000
 					19200000
 					533000000
 					533000000
-					19200000>;
+					19200000
+					500000000>;
 			};
 
-			qcom,npu-pwrlevel@4 {
-				reg = <4>;
+			qcom,npu-pwrlevel@3 {
+				reg = <3>;
 				vreg = <4>;
 				clk-freq = <19200000
 					428000000
@@ -273,21 +215,17 @@
 					100000000
 					850000000
 					850000000
-					428000000
-					428000000
-					533000000
-					19200000
-					428000000
 					240000000
 					100000000
 					19200000
 					850000000
 					850000000
-					19200000>;
+					19200000
+					660000000>;
 			};
 
-			qcom,npu-pwrlevel@5 {
-				reg = <5>;
+			qcom,npu-pwrlevel@4 {
+				reg = <4>;
 				vreg = <6>;
 				clk-freq = <19200000
 					500000000
@@ -311,17 +249,13 @@
 					100000000
 					1000000000
 					1000000000
-					500000000
-					500000000
-					700000000
-					19200000
-					500000000
 					30000000
 					100000000
 					19200000
 					1000000000
 					1000000000
-					19200000>;
+					19200000
+					800000000>;
 			};
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/kona-pcie.dtsi b/arch/arm64/boot/dts/qcom/kona-pcie.dtsi
index a13400a..694c5fc 100644
--- a/arch/arm64/boot/dts/qcom/kona-pcie.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-pcie.dtsi
@@ -67,7 +67,7 @@
 			<&clock_gcc GCC_PCIE_0_CFG_AHB_CLK>,
 			<&clock_gcc GCC_PCIE_0_MSTR_AXI_CLK>,
 			<&clock_gcc GCC_PCIE_0_SLV_AXI_CLK>,
-			<&clock_gcc GCC_PCIE_MDM_CLKREF_EN>,
+			<&clock_gcc GCC_PCIE_WIFI_CLKREF_EN>,
 			<&clock_gcc GCC_PCIE_0_SLV_Q2A_AXI_CLK>,
 			<&clock_gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>,
 			<&clock_gcc GCC_PCIE0_PHY_REFGEN_CLK>,
@@ -307,7 +307,7 @@
 			<&clock_gcc GCC_PCIE_1_CFG_AHB_CLK>,
 			<&clock_gcc GCC_PCIE_1_MSTR_AXI_CLK>,
 			<&clock_gcc GCC_PCIE_1_SLV_AXI_CLK>,
-			<&clock_gcc GCC_PCIE_MDM_CLKREF_EN>,
+			<&clock_gcc GCC_PCIE_WIGIG_CLKREF_EN>,
 			<&clock_gcc GCC_PCIE_1_SLV_Q2A_AXI_CLK>,
 			<&clock_gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>,
 			<&clock_gcc GCC_PCIE1_PHY_REFGEN_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
index 6a7413d..bb5cbca 100644
--- a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
@@ -192,6 +192,51 @@
 			};
 		};
 
+		pmx_ts_active {
+			ts_active: ts_active {
+					mux {
+						pins = "gpio38", "gpio39";
+						function = "gpio";
+					};
+
+					config {
+						pins = "gpio38", "gpio39";
+						drive-strength = <8>;
+						bias-pull-up;
+					};
+			};
+		};
+
+		pmx_ts_int_suspend {
+			ts_int_suspend: ts_int_suspend {
+				mux {
+					pins = "gpio39";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio39";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
+		pmx_ts_reset_suspend {
+			ts_reset_suspend: ts_reset_suspend {
+				mux {
+					pins = "gpio38";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio38";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
 		ufs_dev_reset_assert: ufs_dev_reset_assert {
 			config {
 				pins = "ufs_reset";
@@ -667,12 +712,12 @@
 		pmx_sde: pmx_sde {
 			sde_dsi_active: sde_dsi_active {
 				mux {
-					pins = "gpio75";
+					pins = "gpio75", "gpio60";
 					function = "gpio";
 				};
 
 				config {
-					pins = "gpio75";
+					pins = "gpio75", "gpio60";
 					drive-strength = <8>;   /* 8 mA */
 					bias-disable = <0>;   /* no pull */
 				};
@@ -680,12 +725,12 @@
 
 			sde_dsi_suspend: sde_dsi_suspend {
 				mux {
-					pins = "gpio75";
+					pins = "gpio75", "gpio60";
 					function = "gpio";
 				};
 
 				config {
-					pins = "gpio75";
+					pins = "gpio75", "gpio60";
 					drive-strength = <2>;   /* 2 mA */
 					bias-pull-down;         /* PULL DOWN */
 				};
@@ -693,12 +738,12 @@
 
 			sde_dsi1_active: sde_dsi1_active {
 				mux {
-					pins = "gpio128";
+					pins = "gpio128", "gpio60";
 					function = "gpio";
 				};
 
 				config {
-					pins = "gpio128";
+					pins = "gpio128", "gpio60";
 					drive-strength = <8>;   /* 8 mA */
 					bias-disable = <0>;   /* no pull */
 				};
@@ -706,12 +751,12 @@
 
 			sde_dsi1_suspend: sde_dsi1_suspend {
 				mux {
-					pins = "gpio128";
+					pins = "gpio128", "gpio60";
 					function = "gpio";
 				};
 
 				config {
-					pins = "gpio128";
+					pins = "gpio128", "gpio60";
 					drive-strength = <2>;   /* 2 mA */
 					bias-pull-down;         /* PULL DOWN */
 				};
@@ -2479,7 +2524,7 @@
 			};
 		};
 
-		bt_en_active: bt_en_active {
+		bt_en_sleep: bt_en_sleep {
 			mux {
 			pins = "gpio21";
 			function = "gpio";
@@ -2488,7 +2533,8 @@
 			config {
 			pins = "gpio21";
 			drive-strength = <2>;
-			bias-pull-up;
+			output-low;
+			bias-pull-down;
 			};
 		};
 
@@ -2551,6 +2597,98 @@
 			};
 		};
 
+		nfc {
+			nfc_int_active: nfc_int_active {
+				/* active state */
+				mux {
+					/* GPIO 111 NFC Read Interrupt */
+					pins = "gpio111";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio111";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-up;
+				};
+			};
+
+			nfc_int_suspend: nfc_int_suspend {
+				/* sleep state */
+				mux {
+					/* GPIO 111 NFC Read Interrupt */
+					pins = "gpio111";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio111";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-up;
+				};
+			};
+
+			nfc_enable_active: nfc_enable_active {
+				/* active state */
+				mux {
+					/* 6: Enable 110: Firmware */
+					pins = "gpio6", "gpio110";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio6", "gpio110";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-up;
+				};
+			};
+
+			nfc_enable_suspend: nfc_enable_suspend {
+				/* sleep state */
+				mux {
+					/* 6: Enable 110: Firmware */
+					pins = "gpio6", "gpio110";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio6", "gpio110";
+					drive-strength = <2>; /* 2 MA */
+					bias-disable;
+				};
+			};
+
+			nfc_clk_req_active: nfc_clk_req_active {
+				/* active state */
+				mux {
+					/* GPIO 7: NFC CLOCK REQUEST */
+					pins = "gpio7";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio7";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-up;
+				};
+			};
+
+			nfc_clk_req_suspend: nfc_clk_req_suspend {
+				/* sleep state */
+				mux {
+					/* GPIO 7: NFC CLOCK REQUEST */
+					pins = "gpio7";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio7";
+					drive-strength = <2>; /* 2 MA */
+					bias-disable;
+				};
+			};
+		};
+
 		/* SE 2 pin mappings */
 		qupv3_se2_i2c_pins: qupv3_se2_i2c_pins {
 			qupv3_se2_i2c_active: qupv3_se2_i2c_active {
diff --git a/arch/arm64/boot/dts/qcom/kona-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/kona-pmic-overlay.dtsi
index 664620f..df613e1 100644
--- a/arch/arm64/boot/dts/qcom/kona-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-pmic-overlay.dtsi
@@ -36,6 +36,56 @@
 	};
 };
 
+&pm8150_gpios {
+	key_home {
+		key_home_default: key_home_default {
+			pins = "gpio1";
+			function = "normal";
+			input-enable;
+			bias-pull-up;
+			power-source = <0>;
+		};
+	};
+
+	key_vol_up {
+		key_vol_up_default: key_vol_up_default {
+			pins = "gpio6";
+			function = "normal";
+			input-enable;
+			bias-pull-up;
+			power-source = <1>;
+		};
+	};
+};
+
+&pm8150b_gpios {
+	qnovo_fet_ctrl {
+		qnovo_fet_ctrl_state1: qnovo_fet_ctrl_state1 {
+			pins = "gpio8";
+			function = "normal";
+			input-enable;
+			output-disable;
+			bias-disable;
+			power-source = <0>;
+		};
+
+		qnovo_fet_ctrl_state2: qnovo_fet_ctrl_state2 {
+			pins = "gpio8";
+			function = "normal";
+			input-enable;
+			output-disable;
+			bias-pull-down;
+			power-source = <0>;
+		};
+	};
+};
+
+&pm8150b_qnovo {
+	pinctrl-names = "q_state1", "q_state2";
+	pinctrl-0 = <&qnovo_fet_ctrl_state1>;
+	pinctrl-1 = <&qnovo_fet_ctrl_state2>;
+};
+
 &pm8150b_charger {
 	smb5_vconn: qcom,smb5-vconn {
 		regulator-name = "smb5-vconn";
@@ -52,6 +102,20 @@
 	vconn-supply = <&smb5_vconn>;
 };
 
+&pm8150b_gpios {
+	haptics_boost {
+		haptics_boost_default: haptics_boost_default {
+			pins = "gpio5";
+			function = "normal";
+			output-enable;
+			input-disable;
+			bias-disable;
+			qcom,drive-strength = <3>; /* high */
+			power-source = <1>; /* 1.8 V */
+		};
+	};
+};
+
 &soc {
 	vreg_tof: regulator-dbb1 {
 		compatible = "regulator-fixed";
@@ -62,4 +126,19 @@
 		startup-delay-us = <1000>;
 		enable-active-high;
 	};
+
+	vreg_hap_boost: regulator-haptics-boost {
+		compatible = "regulator-fixed";
+		regulator-name = "vdd_hap_boost";
+		gpio = <&pm8150b_gpios 5 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&haptics_boost_default>;
+		startup-delay-us = <1000>;
+		enable-active-high;
+		status = "disabled";
+	};
+};
+
+&usb0 {
+	extcon = <&pm8150b_pdphy>;
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-qrd.dtsi b/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
index 7072469..67a3f6d7 100644
--- a/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
@@ -3,9 +3,12 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
+#include <dt-bindings/gpio/gpio.h>
 #include "kona-pmic-overlay.dtsi"
 #include "kona-sde-display.dtsi"
+#include "kona-camera-sensor-qrd.dtsi"
 #include "kona-audio-overlay.dtsi"
+#include "kona-thermal-overlay.dtsi"
 
 &vendor {
 	kona_qrd_batterydata: qcom,battery-data {
@@ -22,6 +25,10 @@
 	status = "ok";
 };
 
+&qupv3_se6_4uart {
+	status = "ok";
+};
+
 &dai_mi2s2 {
 	qcom,msm-mi2s-tx-lines = <1>;
 	pinctrl-names = "default", "sleep";
@@ -36,6 +43,8 @@
 	qcom,audio-routing =
 		"AMIC2", "MIC BIAS2",
 		"MIC BIAS2", "Analog Mic2",
+		"TX DMIC0", "MIC BIAS3",
+		"MIC BIAS3", "Digital Mic0",
 		"TX DMIC1", "MIC BIAS3",
 		"MIC BIAS3", "Digital Mic1",
 		"TX DMIC2", "MIC BIAS1",
@@ -48,7 +57,17 @@
 		"IN2_HPHR", "HPHR_OUT",
 		"IN3_AUX", "AUX_OUT",
 		"TX SWR_ADC0", "ADC1_OUTPUT",
-		"TX SWR_ADC2", "ADC2_OUTPUT",
+		"TX SWR_ADC1", "ADC2_OUTPUT",
+		"TX SWR_ADC2", "ADC3_OUTPUT",
+		"TX SWR_ADC3", "ADC4_OUTPUT",
+		"TX SWR_DMIC0", "DMIC1_OUTPUT",
+		"TX SWR_DMIC1", "DMIC2_OUTPUT",
+		"TX SWR_DMIC2", "DMIC3_OUTPUT",
+		"TX SWR_DMIC3", "DMIC4_OUTPUT",
+		"TX SWR_DMIC4", "DMIC5_OUTPUT",
+		"TX SWR_DMIC5", "DMIC6_OUTPUT",
+		"TX SWR_DMIC6", "DMIC7_OUTPUT",
+		"TX SWR_DMIC7", "DMIC8_OUTPUT",
 		"WSA SRC0_INP", "SRC0",
 		"WSA_TX DEC0_INP", "TX DEC0 MUX",
 		"WSA_TX DEC1_INP", "TX DEC1 MUX",
@@ -56,10 +75,63 @@
 		"RX_TX DEC1_INP", "TX DEC1 MUX",
 		"RX_TX DEC2_INP", "TX DEC2 MUX",
 		"RX_TX DEC3_INP", "TX DEC3 MUX",
-		"SpkrRight IN", "WSA_SPK2 OUT";
+		"SpkrRight IN", "WSA_SPK2 OUT",
+		"VA DMIC1", "MIC BIAS3",
+		"VA DMIC2", "MIC BIAS1",
+		"VA DMIC3", "MIC BIAS1",
+		"VA DMIC5", "MIC BIAS4",
+		"VA SWR_ADC1", "ADC2_OUTPUT";
 	qcom,wsa-max-devs = <1>;
 	qcom,wsa-devs = <&wsa881x_0212>, <&wsa881x_0214>;
 	qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
+
+	qcom,msm-mbhc-usbc-audio-supported = <1>;
+};
+
+&qupv3_se1_i2c {
+	status = "ok";
+	qcom,clk-freq-out = <1000000>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 111 0x00>;
+		qcom,nq-ven = <&tlmm 6 0x00>;
+		qcom,nq-firm = <&tlmm 110 0x00>;
+		qcom,nq-clkreq = <&tlmm 7 0x00>;
+		interrupt-parent = <&tlmm>;
+		interrupts = <111 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_enable_active
+				&nfc_clk_req_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend
+				&nfc_clk_req_suspend>;
+	};
+};
+
+&qupv3_se13_i2c {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	status = "ok";
+
+	st_fts@49 {
+		compatible = "st,fts";
+		reg = <0x49>;
+		interrupt-parent = <&tlmm>;
+		interrupts = <39 0x2008>;
+		vdd-supply = <&pm8150a_l1>;
+		avdd-supply = <&pm8150_l13>;
+		pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+		pinctrl-0 = <&ts_active>;
+		pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+		st,irq-gpio = <&tlmm 39 0x2008>;
+		st,reset-gpio = <&tlmm 38 0x00>;
+		st,regulator_dvdd = "vdd";
+		st,regulator_avdd = "avdd";
+	};
 };
 
 &ufsphy_mem {
@@ -91,6 +163,72 @@
 	status = "ok";
 };
 
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_vol_up_default>;
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8150_gpios 6 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <KEY_VOLUMEUP>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+	};
+};
+
+&vreg_hap_boost {
+	status = "ok";
+};
+
+&pm8150b_haptics {
+	qcom,vmax-mv = <1697>;
+	qcom,play-rate-us = <5882>;
+	vdd-supply = <&vreg_hap_boost>;
+
+	wf_0 {
+		/* CLICK */
+		qcom,wf-play-rate-us = <5882>;
+		qcom,wf-vmax-mv = <1697>;
+	};
+
+	wf_1 {
+		/* DOUBLE CLICK */
+		qcom,wf-play-rate-us = <5882>;
+		qcom,wf-vmax-mv = <1697>;
+	};
+
+	wf_2 {
+		/* TICK */
+		qcom,wf-play-rate-us = <5882>;
+		qcom,wf-vmax-mv = <1697>;
+	};
+
+	wf_3 {
+		/* THUD */
+		qcom,wf-play-rate-us = <5882>;
+		qcom,wf-vmax-mv = <1697>;
+	};
+
+	wf_4 {
+		/* POP */
+		qcom,wf-play-rate-us = <5882>;
+		qcom,wf-vmax-mv = <1697>;
+	};
+
+	wf_5 {
+		/* HEAVY CLICK */
+		qcom,wf-play-rate-us = <5882>;
+		qcom,wf-vmax-mv = <1697>;
+	};
+};
+
 &pm8150b_vadc {
 	#address-cells = <1>;
 	#size-cells = <0>;
@@ -193,14 +331,6 @@
 		qcom,pre-scaling = <1 3>;
 	};
 
-	xo_therm@4c {
-		reg = <ADC_XO_THERM_PU2>;
-		label = "xo_therm";
-		qcom,ratiometric;
-		qcom,hw-settle-time = <200>;
-		qcom,pre-scaling = <1 1>;
-	};
-
 	skin_therm@4d {
 		reg = <ADC_AMUX_THM1_PU2>;
 		label = "skin_therm";
@@ -268,12 +398,6 @@
 	#address-cells = <1>;
 	#size-cells = <0>;
 
-	xo_therm@4c {
-		reg = <ADC_XO_THERM_PU2>;
-		qcom,ratiometric;
-		qcom,hw-settle-time = <200>;
-	};
-
 	skin_therm@4d {
 		reg = <ADC_AMUX_THM1_PU2>;
 		qcom,ratiometric;
@@ -323,3 +447,125 @@
 &sde_dsi {
 	qcom,dsi-default-panel = <&dsi_sw43404_amoled_cmd>;
 };
+
+&thermal_zones {
+	wp-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150b_adc_tm ADC_AMUX_THM1_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	skin-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150_adc_tm ADC_AMUX_THM1_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	pa-therm1-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150_adc_tm ADC_AMUX_THM2_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	camera-flash-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM1_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	skin-msm-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM2_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	pa-therm2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM3_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+};
+
+&sdhc_2 {
+	vdd-supply = <&pm8150a_l9>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8150a_l6>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
+
+	cd-gpios = <&tlmm 77 GPIO_ACTIVE_LOW>;
+
+	status = "ok";
+};
+
+&vendor {
+	bluetooth: bt_qca6390 {
+		compatible = "qca,qca6390";
+		pinctrl-names = "default";
+		pinctrl-0 = <&bt_en_sleep>;
+		qca,bt-reset-gpio = <&tlmm 21 0>; /* BT_EN */
+		qca,bt-vdd-aon-supply = <&pm8150_s6>;
+		qca,bt-vdd-dig-supply = <&pm8009_s2>;
+		qca,bt-vdd-rfa1-supply = <&pm8150_s5>;
+		qca,bt-vdd-rfa2-supply = <&pm8150a_s8>;
+
+		qca,bt-vdd-aon-voltage-level = <950000 950000>;
+		qca,bt-vdd-dig-voltage-level = <950000 952000>;
+		qca,bt-vdd-rfa1-voltage-level = <1900000 1900000>;
+		qca,bt-vdd-rfa2-voltage-level = <1350000 1350000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-regulators.dtsi b/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
index c7bb1af..81c7876 100644
--- a/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
@@ -429,6 +429,7 @@
 				= <RPMH_REGULATOR_LEVEL_MAX>;
 			qcom,init-voltage-level
 				= <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+			regulator-always-on;
 		};
 
 		VDD_MMCX_LEVEL_AO: S4C_LEVEL_AO:
diff --git a/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi b/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi
index abdbab9..3062d52 100644
--- a/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi
@@ -4,8 +4,33 @@
  */
 
 #include "dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi"
+#include "dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi"
+#include "dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi"
+#include "dsi-panel-sharp-dsc-4k-cmd.dtsi"
+#include "dsi-panel-sharp-dsc-4k-video.dtsi"
+#include "dsi-panel-sharp-1080p-cmd.dtsi"
+#include "dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi"
+#include "dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi"
+#include "dsi-panel-nt35695b-truly-fhd-cmd.dtsi"
+#include "dsi-panel-nt35695b-truly-fhd-video.dtsi"
 #include <dt-bindings/clock/mdss-7nm-pll-clk.h>
 
+&tlmm {
+	display_panel_avdd_default: display_panel_avdd_default {
+		mux {
+			pins = "gpio61";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio61";
+			drive-strength = <8>;
+			bias-disable = <0>;
+			output-high;
+		};
+	};
+};
+
 &soc {
 	ext_disp: qcom,msm-ext-disp {
 		compatible = "qcom,msm-ext-disp";
@@ -40,6 +65,43 @@
 		};
 	};
 
+	dsi_panel_pwr_supply_avdd: dsi_panel_pwr_supply_avdd {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,panel-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "vddio";
+			qcom,supply-min-voltage = <1800000>;
+			qcom,supply-max-voltage = <1800000>;
+			qcom,supply-enable-load = <62000>;
+			qcom,supply-disable-load = <80>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+
+		qcom,panel-supply-entry@1 {
+			reg = <1>;
+			qcom,supply-name = "avdd";
+			qcom,supply-min-voltage = <4600000>;
+			qcom,supply-max-voltage = <6000000>;
+			qcom,supply-enable-load = <100000>;
+			qcom,supply-disable-load = <100>;
+		};
+	};
+
+	display_panel_avdd: display_gpio_regulator@1 {
+		compatible = "regulator-fixed";
+		regulator-name = "display_panel_avdd";
+		regulator-min-microvolt = <5500000>;
+		regulator-max-microvolt = <5500000>;
+		regulator-enable-ramp-delay = <233>;
+		gpio = <&tlmm 61 0>;
+		enable-active-high;
+		regulator-boost-on;
+		pinctrl-names = "default";
+		pinctrl-0 = <&display_panel_avdd_default>;
+	};
+
 	sde_dsi: qcom,dsi-display-primary {
 		compatible = "qcom,dsi-display";
 		label = "primary";
@@ -63,6 +125,7 @@
 
 		vddio-supply = <&pm8150_l14>;
 		vdd-supply = <&pm8150a_l11>;
+		avdd-supply = <&display_panel_avdd>;
 
 		qcom,mdp = <&mdss_mdp>;
 		qcom,dsi-default-panel = <&dsi_sw43404_amoled_cmd>;
@@ -88,9 +151,10 @@
 };
 
 &mdss_mdp {
-	connectors = <&sde_wb &sde_dsi>;
+	connectors = <&sde_dp &sde_wb &sde_dsi>;
 };
 
+/* PHY TIMINGS REVISION W */
 &dsi_sw43404_amoled_cmd {
 	qcom,mdss-dsi-display-timings {
 		timing@0 {
@@ -101,3 +165,103 @@
 		};
 	};
 };
+
+&dsi_sw43404_amoled_video {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 14 05 05 1f 1e 05
+				05 03 02 04 00 12 15];
+			qcom,display-topology = <2 2 1>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_sw43404_amoled_fhd_plus_cmd {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 12 04 04 1e 1e 04
+				05 02 03 04 00 11 14];
+			qcom,display-topology = <2 2 1>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_sharp_4k_dsc_cmd {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22 08
+				08 05 02 04 00 19 18];
+			qcom,display-topology = <2 2 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_sharp_4k_dsc_video {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22 08
+				08 05 02 04 00 19 18];
+			qcom,display-topology = <2 2 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_sharp_1080_cmd {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1E 08 08 24 22 08
+				08 05 02 04 00 19 18];
+			qcom,display-topology = <1 0 1>;
+			qcom,default-topology-index = <0>;
+			qcom,mdss-dsi-panel-clockrate = <900000000>;
+		};
+	};
+};
+
+&dsi_dual_nt35597_truly_cmd {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07
+				07 05 02 04 00 18 17];
+			qcom,display-topology = <2 2 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_dual_nt35597_truly_video {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07
+				07 05 02 04 00 18 17];
+			qcom,display-topology = <2 2 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_nt35695b_truly_fhd_cmd {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22
+				08 08 05 02 04 00 19 17];
+			qcom,display-topology = <1 0 1>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_nt35695b_truly_fhd_video {
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22
+				08 08 05 02 04 00 19 17];
+			qcom,display-topology = <1 0 1>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-sde.dtsi b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
index 9a5e0f2..3ee58b8 100644
--- a/arch/arm64/boot/dts/qcom/kona-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
@@ -74,7 +74,7 @@
 		qcom,sde-wb-size = <0x2c8>;
 		qcom,sde-wb-xin-id = <6>;
 		qcom,sde-wb-id = <2>;
-		qcom,sde-wb-clk-ctrl = <0x3b8 24>;
+		qcom,sde-wb-clk-ctrl = <0x2bc 16>;
 
 		qcom,sde-intf-off = <0x6b000 0x6b800
 					0x6c000 0x6c800>;
@@ -152,6 +152,7 @@
 		qcom,sde-pipe-order-version = <0x1>;
 		qcom,sde-has-dim-layer;
 		qcom,sde-has-dest-scaler;
+		qcom,sde-has-idle-pc;
 		qcom,sde-max-dest-scaler-input-linewidth = <2048>;
 		qcom,sde-max-dest-scaler-output-linewidth = <2560>;
 		qcom,sde-max-bw-low-kbps = <12600000>;
@@ -176,6 +177,8 @@
 
 		qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
 		qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
+		qcom,sde-vbif-qos-cwb-remap = <3 3 4 4 5 5 6 3>;
+		qcom,sde-vbif-qos-lutdma-remap = <3 3 3 3 4 4 4 4>;
 
 		/* macrotile & macrotile-qseed has the same configs */
 		qcom,sde-danger-lut = <0x0000000f 0x0000ffff
@@ -204,6 +207,8 @@
 		qcom,sde-reg-dma-off = <0>;
 		qcom,sde-reg-dma-version = <0x00010002>;
 		qcom,sde-reg-dma-trigger-off = <0x119c>;
+		qcom,sde-reg-dma-xin-id = <7>;
+		qcom,sde-reg-dma-clk-ctrl = <0x2bc 20>;
 
 		qcom,sde-secure-sid-mask = <0x4200801>;
 
@@ -275,13 +280,23 @@
 
 		/* data and reg bus scale settings */
 		qcom,sde-data-bus {
-			qcom,msm-bus,name = "mdss_sde";
+			qcom,msm-bus,name = "mdss_sde_mnoc_llcc";
 			qcom,msm-bus,num-cases = <3>;
 			qcom,msm-bus,num-paths = <2>;
 			qcom,msm-bus,vectors-KBps =
-				<22 512 0 0>, <23 512 0 0>,
-				<22 512 0 6400000>, <23 512 0 6400000>,
-				<22 512 0 6400000>, <23 512 0 6400000>;
+				<22 770 0 0>, <23 770 0 0>,
+				<22 770 0 6400000>, <23 770 0 6400000>,
+				<22 770 0 6400000>, <23 770 0 6400000>;
+		};
+
+		qcom,sde-ebi-bus  {
+			qcom,msm-bus,name = "mdss_sde_ebi";
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+				<129 512 0 0>,
+				<129 512 0 6400000>,
+				<129 512 0 6400000>;
 		};
 
 		qcom,sde-reg-bus {
diff --git a/arch/arm64/boot/dts/qcom/kona-thermal-overlay.dtsi b/arch/arm64/boot/dts/qcom/kona-thermal-overlay.dtsi
new file mode 100644
index 0000000..61be3ca
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-thermal-overlay.dtsi
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+&mdss_mdp {
+	#cooling-cells = <2>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-thermal.dtsi b/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
index af60af1..64fba05 100644
--- a/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
@@ -4,7 +4,51 @@
  */
 
 #include <dt-bindings/thermal/thermal.h>
-#include "kona-pmic-overlay.dtsi"
+
+&cpufreq_hw {
+	qcom,cpu-isolation {
+		compatible = "qcom,cpu-isolate";
+		cpu0_isolate: cpu0-isolate {
+			qcom,cpu = <&CPU0>;
+			#cooling-cells = <2>;
+		};
+
+		cpu1_isolate: cpu1-isolate {
+			qcom,cpu = <&CPU1>;
+			#cooling-cells = <2>;
+		};
+
+		cpu2_isolate: cpu2-isolate {
+			qcom,cpu = <&CPU2>;
+			#cooling-cells = <2>;
+		};
+
+		cpu3_isolate: cpu3-isolate {
+			qcom,cpu = <&CPU3>;
+			#cooling-cells = <2>;
+		};
+
+		cpu4_isolate: cpu4-isolate {
+			qcom,cpu = <&CPU4>;
+			#cooling-cells = <2>;
+		};
+
+		cpu5_isolate: cpu5-isolate {
+			qcom,cpu = <&CPU5>;
+			#cooling-cells = <2>;
+		};
+
+		cpu6_isolate: cpu6-isolate {
+			qcom,cpu = <&CPU6>;
+			#cooling-cells = <2>;
+		};
+
+		cpu7_isolate: cpu7-isolate {
+			qcom,cpu = <&CPU7>;
+			#cooling-cells = <2>;
+		};
+	};
+};
 
 &thermal_zones {
 	aoss0-usr {
@@ -18,6 +62,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -32,6 +81,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -46,6 +100,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -60,6 +119,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -74,6 +138,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -88,6 +157,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -102,6 +176,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -116,6 +195,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -130,6 +214,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -144,6 +233,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -158,6 +252,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -172,6 +271,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -186,6 +290,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -200,6 +309,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -214,6 +328,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -228,6 +347,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -242,6 +366,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -256,6 +385,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -270,6 +404,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -284,6 +423,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -298,6 +442,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -312,6 +461,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -326,6 +480,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -340,6 +499,11 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
+			active-config1 {
+				temperature = <115000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
 		};
 	};
 
@@ -354,104 +518,340 @@
 				hysteresis = <1000>;
 				type = "passive";
 			};
-		};
-	};
-
-	wp-therm-usr {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "user_space";
-		thermal-sensors = <&pm8150b_adc_tm ADC_AMUX_THM1_PU2>;
-		trips {
-			active-config0 {
-				temperature = <125000>;
+			active-config1 {
+				temperature = <115000>;
 				hysteresis = <1000>;
 				type = "passive";
 			};
 		};
 	};
 
-	xo-therm-usr {
+	gpuss-max-step {
+		polling-delay-passive = <10>;
+		polling-delay = <100>;
+		thermal-governor = "step_wise";
+		trips {
+			gpu_trip0: gpu-trip0 {
+				temperature = <95000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			gpu_cdev {
+				trip = <&gpu_trip0>;
+				cooling-device = <&msm_gpu THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>;
+			};
+		};
+	};
+
+	apc-0-max-step {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
-		thermal-governor = "user_space";
-		thermal-sensors = <&pm8150_adc_tm ADC_XO_THERM_PU2>;
+		thermal-governor = "step_wise";
 		trips {
-			active-config0 {
-				temperature = <125000>;
-				hysteresis = <1000>;
+			silver-trip {
+				temperature = <120000>;
+				hysteresis = <0>;
 				type = "passive";
 			};
 		};
 	};
 
-	skin-therm-usr {
+	apc-1-max-step {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
-		thermal-governor = "user_space";
-		thermal-sensors = <&pm8150_adc_tm ADC_AMUX_THM1_PU2>;
+		thermal-governor = "step_wise";
 		trips {
-			active-config0 {
-				temperature = <125000>;
-				hysteresis = <1000>;
+			gold-trip {
+				temperature = <120000>;
+				hysteresis = <0>;
 				type = "passive";
 			};
 		};
 	};
 
-	pa-therm1-usr {
-		polling-delay-passive = <0>;
+	pop-mem-step {
+		polling-delay-passive = <10>;
 		polling-delay = <0>;
-		thermal-governor = "user_space";
-		thermal-sensors = <&pm8150_adc_tm ADC_AMUX_THM2_PU2>;
+		thermal-sensors = <&tsens1 3>;
+		thermal-governor = "step_wise";
 		trips {
-			active-config0 {
-				temperature = <125000>;
-				hysteresis = <1000>;
+			pop_trip: pop-trip {
+				temperature = <95000>;
+				hysteresis = <0>;
 				type = "passive";
 			};
 		};
+
+		cooling-maps {
+			pop_cdev4 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU4 THERMAL_NO_LIMIT
+						THERMAL_NO_LIMIT>;
+			};
+
+			pop_cdev7 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU7 THERMAL_NO_LIMIT
+						THERMAL_NO_LIMIT>;
+			};
+		};
 	};
 
-	camera-flash-therm-usr {
+	cpu-0-0-step {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
-		thermal-governor = "user_space";
-		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM1_PU2>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&tsens0 1>;
 		trips {
-			active-config0 {
-				temperature = <125000>;
-				hysteresis = <1000>;
+			cpu00_config: cpu00-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
+
+		cooling-maps {
+			cpu00_cdev {
+				trip = <&cpu00_config>;
+				cooling-device = <&cpu0_isolate 1 1>;
+			};
+		};
 	};
 
-	skin-msm-therm-usr {
+	cpu-0-1-step {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
-		thermal-governor = "user_space";
-		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM2_PU2>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&tsens0 2>;
 		trips {
-			active-config0 {
-				temperature = <125000>;
-				hysteresis = <1000>;
+			cpu01_config: cpu01-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
+
+		cooling-maps {
+			cpu01_cdev {
+				trip = <&cpu01_config>;
+				cooling-device = <&cpu1_isolate 1 1>;
+			};
+		};
 	};
 
-	pa-therm2-usr {
+	cpu-0-2-step {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
-		thermal-governor = "user_space";
-		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM3_PU2>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&tsens0 3>;
 		trips {
-			active-config0 {
-				temperature = <125000>;
-				hysteresis = <1000>;
+			cpu02_config: cpu02-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
+
+		cooling-maps {
+			cpu02_cdev {
+				trip = <&cpu02_config>;
+				cooling-device = <&cpu2_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-0-3-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 4>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu03_config: cpu03-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu03_cdev {
+				trip = <&cpu03_config>;
+				cooling-device = <&cpu3_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-0-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 7>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu10_config: cpu10-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu10_cdev {
+				trip = <&cpu10_config>;
+				cooling-device = <&cpu4_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-1-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 8>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu11_config: cpu11-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu11_cdev {
+				trip = <&cpu11_config>;
+				cooling-device = <&cpu5_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-2-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 9>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu12_config: cpu12-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu12_cdev {
+				trip = <&cpu12_config>;
+				cooling-device = <&cpu6_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-3-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 10>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu13_config: cpu13-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu13_cdev {
+				trip = <&cpu13_config>;
+				cooling-device = <&cpu7_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-4-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 11>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu14_config: cpu14-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu14_cdev {
+				trip = <&cpu14_config>;
+				cooling-device = <&cpu4_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-5-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 12>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu15_config: cpu15-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu15_cdev {
+				trip = <&cpu15_config>;
+				cooling-device = <&cpu5_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-6-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 13>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu16_config: cpu16-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu16_cdev {
+				trip = <&cpu16_config>;
+				cooling-device = <&cpu6_isolate 1 1>;
+			};
+		};
+	};
+
+	cpu-1-7-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 14>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu17_config: cpu17-config {
+				temperature = <110000>;
+				hysteresis = <10000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			cpu17_cdev {
+				trip = <&cpu17_config>;
+				cooling-device = <&cpu7_isolate 1 1>;
+			};
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-usb.dtsi b/arch/arm64/boot/dts/qcom/kona-usb.dtsi
index df962e9..fca70c3 100644
--- a/arch/arm64/boot/dts/qcom/kona-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-usb.dtsi
@@ -19,10 +19,10 @@
 		#size-cells = <1>;
 		ranges;
 
-		interrupts = <GIC_SPI 494 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 497 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 495 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts-extended = <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+			     <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+			     <&pdc 17 IRQ_TYPE_LEVEL_HIGH>,
+			     <&pdc 15 IRQ_TYPE_EDGE_BOTH>;
 		interrupt-names = "dp_hs_phy_irq", "pwr_event_irq",
 				"ss_phy_irq", "dm_hs_phy_irq";
 		qcom,use-pdc-interrupts;
@@ -86,7 +86,7 @@
 			compatible = "snps,dwc3";
 			reg = <0x0a600000 0xcd00>;
 			interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
-			usb-phy = <&usb2_phy0>, <&usb_nop_phy>;
+			usb-phy = <&usb2_phy0>, <&usb_qmp_dp_phy>;
 			linux,sysdev_is_parent;
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
@@ -95,7 +95,7 @@
 			snps,usb3-u1u2-disable;
 			usb-core-id = <0>;
 			tx-fifo-resize;
-			maximum-speed = "high-speed";
+			maximum-speed = "super-speed-plus";
 			dr_mode = "drd";
 		};
 
@@ -333,8 +333,6 @@
 		resets = <&clock_gcc GCC_USB3_DP_PHY_PRIM_BCR>,
 			<&clock_gcc GCC_USB3_PHY_PRIM_BCR>;
 		reset-names = "global_phy_reset", "phy_reset";
-
-		status = "disabled";
 	};
 
 	usb_audio_qmi_dev {
diff --git a/arch/arm64/boot/dts/qcom/kona-vidc.dtsi b/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
index b10c5f7..758f9d4 100644
--- a/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
@@ -51,17 +51,17 @@
 			qcom,bus-master = <MSM_BUS_MASTER_AMPSS_M0>;
 			qcom,bus-slave = <MSM_BUS_SLAVE_VENUS_CFG>;
 			qcom,bus-governor = "performance";
-			qcom,bus-range-kbps = <1000 1000>;
+			qcom,bus-range-kbps = <762 762>;
 			operating-points-v2 = <&venus_bus_cnoc_bw_table>;
 		};
 
 		venus_bus_ddr {
 			compatible = "qcom,msm-vidc,bus";
 			label = "venus-ddr";
-			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-master = <MSM_BUS_MASTER_LLCC>;
 			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
 			qcom,bus-governor = "msm-vidc-ddr";
-			qcom,bus-range-kbps = <1000 6533000>;
+			qcom,bus-range-kbps = <762 6533000>;
 			operating-points-v2 = <&ddr_bw_opp_table>;
 		};
 
@@ -71,7 +71,7 @@
 			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
 			qcom,bus-slave = <MSM_BUS_SLAVE_LLCC>;
 			qcom,bus-governor = "msm-vidc-llcc";
-			qcom,bus-range-kbps = <1000 6533000>;
+			qcom,bus-range-kbps = <2288 6533000>;
 			operating-points-v2 = <&llcc_bw_opp_table>;
 		};
 
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index 51a408f..d97f329 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -41,6 +41,7 @@
 		swr0 = &swr0;
 		swr1 = &swr1;
 		swr2 = &swr2;
+		mhi-netdev0 = &mhi_netdev_0;
 	};
 
 	cpus {
@@ -58,6 +59,7 @@
 			qcom,freq-domain = <&cpufreq_hw 0 4>;
 			capacity-dmips-mhz = <1024>;
 			dynamic-power-coefficient = <100>;
+			#cooling-cells = <2>;
 			L2_0: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
@@ -196,6 +198,7 @@
 			qcom,freq-domain = <&cpufreq_hw 1 4>;
 			capacity-dmips-mhz = <1894>;
 			dynamic-power-coefficient = <514>;
+			#cooling-cells = <2>;
 			L2_4: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x40000>;
@@ -322,6 +325,7 @@
 			qcom,freq-domain = <&cpufreq_hw 2 4>;
 			capacity-dmips-mhz = <1894>;
 			dynamic-power-coefficient = <598>;
+			#cooling-cells = <2>;
 			L2_7: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x80000>;
@@ -464,11 +468,11 @@
 
 		xbl_aop_mem: xbl_aop_region@80700000 {
 			no-map;
-			reg = <0x0 0x80700000 0x0 0x120000>;
+			reg = <0x0 0x80700000 0x0 0x160000>;
 		};
 
-		cmd_db: reserved-memory@80820000 {
-			reg = <0x0 0x80820000 0x0 0x20000>;
+		cmd_db: reserved-memory@80860000 {
+			reg = <0x0 0x80860000 0x0 0x20000>;
 			compatible = "qcom,cmd-db";
 			no-map;
 		};
@@ -478,86 +482,96 @@
 			reg = <0x0 0x80900000 0x0 0x200000>;
 		};
 
-		removed_mem: removed_region@80b00000 {
+		lpass_pcie_mem: lpass_pcie_region@80b00000 {
 			no-map;
-			reg = <0x0 0x80b00000 0x0 0x1300000>;
+			reg = <0x0 0x80b00000 0x0 0x100000>;
 		};
 
-		qtee_apps_mem: qtee_apps_region@81e00000 {
+		ssc_pcie_mem: ssc_pcie_region@80c00000 {
 			no-map;
-			reg = <0x0 0x81e00000 0x0 0x2600000>;
+			reg = <0x0 0x80c00000 0x0 0x100000>;
 		};
 
-		pil_camera_mem: pil_camera_region@86000000 {
+		removed_mem: removed_region@80d00000 {
+			no-map;
+			reg = <0x0 0x80d00000 0x0 0x1300000>;
+		};
+
+		qtee_apps_mem: qtee_apps_region@82000000 {
+			no-map;
+			reg = <0x0 0x82000000 0x0 0x2600000>;
+		};
+
+		pil_camera_mem: pil_camera_region@86200000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x86000000 0x0 0x500000>;
+			reg = <0x0 0x86200000 0x0 0x500000>;
 		};
 
-		pil_wlan_fw_mem: pil_wlan_fw_region@86500000 {
+		pil_wlan_fw_mem: pil_wlan_fw_region@86700000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x86500000 0x0 0x100000>;
+			reg = <0x0 0x86700000 0x0 0x100000>;
 		};
 
-		pil_ipa_fw_mem: pil_ipa_fw_region@86600000 {
+		pil_ipa_fw_mem: pil_ipa_fw_region@86800000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x86600000 0x0 0x10000>;
+			reg = <0x0 0x86800000 0x0 0x10000>;
 		};
 
-		pil_ipa_gsi_mem: pil_ipa_gsi_region@86610000 {
+		pil_ipa_gsi_mem: pil_ipa_gsi_region@86810000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x86610000 0x0 0xa000>;
+			reg = <0x0 0x86810000 0x0 0xa000>;
 		};
 
-		pil_gpu_mem: pil_gpu_region@8661a000 {
+		pil_gpu_mem: pil_gpu_region@8681a000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x8661a000 0x0 0x2000>;
+			reg = <0x0 0x8681a000 0x0 0x2000>;
 		};
 
-		pil_npu_mem: pil_npu_region@86700000 {
+		pil_npu_mem: pil_npu_region@86900000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x86700000 0x0 0x500000>;
+			reg = <0x0 0x86900000 0x0 0x500000>;
 		};
 
-		pil_video_mem: pil_video_region@86c00000 {
+		pil_video_mem: pil_video_region@86e00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x86c00000 0x0 0x500000>;
+			reg = <0x0 0x86e00000 0x0 0x500000>;
 		};
 
-		pil_cvp_mem: pil_cvp_region@87100000 {
+		pil_cvp_mem: pil_cvp_region@87300000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x87100000 0x0 0x500000>;
+			reg = <0x0 0x87300000 0x0 0x500000>;
 		};
 
-		pil_cdsp_mem: pil_cdsp_region@87600000 {
+		pil_cdsp_mem: pil_cdsp_region@87800000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x87600000 0x0 0x800000>;
+			reg = <0x0 0x87800000 0x0 0x800000>;
 		};
 
-		pil_slpi_mem: pil_slpi_region@87e00000 {
+		pil_slpi_mem: pil_slpi_region@88000000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x87e00000 0x0 0x1500000>;
+			reg = <0x0 0x88000000 0x0 0x1500000>;
 		};
 
-		pil_adsp_mem: pil_adsp_region@89300000 {
+		pil_adsp_mem: pil_adsp_region@89500000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x89300000 0x0 0x1a00000>;
+			reg = <0x0 0x89500000 0x0 0x1c00000>;
 		};
 
-		pil_spss_mem: pil_spss_region@8ad00000 {
+		pil_spss_mem: pil_spss_region@8b100000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x0 0x8ad00000 0x0 0x100000>;
+			reg = <0x0 0x8b100000 0x0 0x100000>;
 		};
 
 		adsp_mem: adsp_region {
@@ -584,6 +598,16 @@
 			size = <0x0 0x400000>;
 		};
 
+		cont_splash_memory: cont_splash_region@9c000000 {
+			reg = <0x0 0x9c000000 0x0 0x02400000>;
+			label = "cont_splash_region";
+		};
+
+		disp_rdump_memory: disp_rdump_region@9c000000 {
+			reg = <0x0 0x9c000000 0x0 0x00800000>;
+			label = "disp_rdump_region";
+		};
+
 		dump_mem: mem_dump_region {
 			compatible = "shared-dma-pool";
 			alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
@@ -1309,6 +1333,21 @@
 		qcom,qsee-reentrancy-support = <2>;
 	};
 
+	qcom_rng: qrng@793000 {
+		compatible = "qcom,msm-rng";
+		reg = <0x793000 0x1000>;
+		qcom,msm-rng-iface-clk;
+		qcom,no-qrng-config;
+		qcom,msm-bus,name = "msm-rng-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<1 618 0 0>,    /* No vote */
+			<1 618 0 300000>;  /* 75 MHz */
+		clocks = <&clock_gcc GCC_PRNG_AHB_CLK>;
+		clock-names = "iface_clk";
+	};
+
 	mdm0: qcom,mdm0 {
 		compatible = "qcom,ext-sdx55m";
 		cell-index = <0>;
@@ -1350,7 +1389,7 @@
 	pdc: interrupt-controller@b220000 {
 		compatible = "qcom,kona-pdc";
 		reg = <0xb220000 0x30000>;
-		qcom,pdc-ranges = <0 480 29>, <42 522 52>, <94 609 30>;
+		qcom,pdc-ranges = <0 480 30>, <42 522 52>, <94 609 30>;
 		#interrupt-cells = <2>;
 		interrupt-parent = <&intc>;
 		interrupt-controller;
@@ -1470,42 +1509,49 @@
 		compatible = "qcom,gdsc";
 		reg = <0x16b004 0x4>;
 		regulator-name = "pcie_0_gdsc";
+		qcom,retain-regs;
 	};
 
 	pcie_1_gdsc: qcom,gdsc@18d004 {
 		compatible = "qcom,gdsc";
 		reg = <0x18d004 0x4>;
 		regulator-name = "pcie_1_gdsc";
+		qcom,retain-regs;
 	};
 
 	pcie_2_gdsc: qcom,gdsc@106004 {
 		compatible = "qcom,gdsc";
 		reg = <0x106004 0x4>;
 		regulator-name = "pcie_2_gdsc";
+		qcom,retain-regs;
 	};
 
 	ufs_card_gdsc: qcom,gdsc@175004 {
 		compatible = "qcom,gdsc";
 		reg = <0x175004 0x4>;
 		regulator-name = "ufs_card_gdsc";
+		qcom,retain-regs;
 	};
 
 	ufs_phy_gdsc: qcom,gdsc@177004 {
 		compatible = "qcom,gdsc";
 		reg = <0x177004 0x4>;
 		regulator-name = "ufs_phy_gdsc";
+		qcom,retain-regs;
 	};
 
 	usb30_prim_gdsc: qcom,gdsc@10f004 {
 		compatible = "qcom,gdsc";
 		reg = <0x10f004 0x4>;
 		regulator-name = "usb30_prim_gdsc";
+		qcom,retain-regs;
 	};
 
 	usb30_sec_gdsc: qcom,gdsc@110004 {
 		compatible = "qcom,gdsc";
 		reg = <0x110004 0x4>;
 		regulator-name = "usb30_sec_gdsc";
+		qcom,retain-regs;
 	};
 
 	hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc: qcom,gdsc@17d050 {
@@ -1550,6 +1596,7 @@
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
 		qcom,support-hw-trigger;
+		qcom,retain-regs;
 	};
 
 	ife_0_gdsc: qcom,gdsc@ad0a004 {
@@ -1560,6 +1607,7 @@
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+		qcom,retain-regs;
 	};
 
 	ife_1_gdsc: qcom,gdsc@ad0b004 {
@@ -1570,6 +1618,7 @@
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+		qcom,retain-regs;
 	};
 
 	ipe_0_gdsc: qcom,gdsc@ad08004 {
@@ -1581,6 +1630,7 @@
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
 		qcom,support-hw-trigger;
+		qcom,retain-regs;
 	};
 
 	sbi_gdsc: qcom,gdsc@ad09004 {
@@ -1591,6 +1641,7 @@
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+		qcom,retain-regs;
 	};
 
 	titan_top_gdsc: qcom,gdsc@ad0c144 {
@@ -1601,6 +1652,7 @@
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+		qcom,retain-regs;
 	};
 
 	/* DISP_CC GDSC */
@@ -1613,6 +1665,7 @@
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
 		qcom,support-hw-trigger;
+		qcom,retain-regs;
 	};
 
 	/* GPU_CC GDSCs */
@@ -1630,6 +1683,7 @@
 		qcom,no-status-check-on-disable;
 		qcom,clk-dis-wait-val = <8>;
 		qcom,gds-timeout = <500>;
+		qcom,retain-regs;
 	};
 
 	gpu_gx_domain_addr: syscon@3d91508 {
@@ -1651,6 +1705,7 @@
 		parent-supply = <&VDD_GFX_LEVEL>;
 		vdd_parent-supply = <&VDD_GFX_LEVEL>;
 		qcom,reset-aon-logic;
+		qcom,retain-regs;
 	};
 
 	/* NPU GDSC */
@@ -1660,6 +1715,7 @@
 		regulator-name = "npu_core_gdsc";
 		clock-names = "ahb_clk";
 		clocks = <&clock_gcc GCC_NPU_CFG_AHB_CLK>;
+		qcom,retain-regs;
 	};
 
 	qcom,sps {
@@ -1676,7 +1732,7 @@
 		clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
-		qcom,support-hw-trigger;
+		qcom,retain-regs;
 	};
 
 	mvs0c_gdsc: qcom,gdsc@abf0bf8 {
@@ -1687,7 +1743,7 @@
 		clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
-		qcom,support-hw-trigger;
+		qcom,retain-regs;
 	};
 
 	mvs1_gdsc: qcom,gdsc@abf0d98 {
@@ -1699,6 +1755,7 @@
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
 		qcom,support-hw-trigger;
+		qcom,retain-regs;
 	};
 
 	mvs1c_gdsc: qcom,gdsc@abf0c98 {
@@ -1709,6 +1766,7 @@
 		clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
 		parent-supply = <&VDD_MMCX_LEVEL>;
 		vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+		qcom,retain-regs;
 	};
 
 	spmi_bus: qcom,spmi@c440000 {
@@ -1720,7 +1778,7 @@
 		      <0xc40a000 0x26000>;
 		reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
 		interrupt-names = "periph_irq";
-		interrupts = <GIC_SPI 481 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,ee = <0>;
 		qcom,channel = <0>;
 		#address-cells = <2>;
@@ -1730,10 +1788,34 @@
 		cell-index = <0>;
 	};
 
+	ufs_ice: ufsice@1d90000 {
+		compatible = "qcom,ice";
+		reg = <0x1d90000 0x8000>;
+		qcom,enable-ice-clk;
+		clock-names = "ufs_core_clk", "bus_clk",
+				"iface_clk", "ice_core_clk";
+		clocks = <&clock_gcc GCC_UFS_PHY_AXI_CLK>,
+			<&clock_gcc GCC_UFS_1X_CLKREF_EN>,
+			<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
+			<&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>;
+		qcom,op-freq-hz = <0>, <0>, <0>, <300000000>;
+		vdd-hba-supply = <&ufs_phy_gdsc>;
+		qcom,msm-bus,name = "ufs_ice_noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<1 650 0 0>,    /* No vote */
+				<1 650 1000 0>; /* Max. bandwidth */
+		qcom,bus-vector-names = "MIN",
+					"MAX";
+		qcom,instance-type = "ufs";
+	};
+
 	ufsphy_mem: ufsphy_mem@1d87000 {
 		reg = <0x1d87000 0xe00>; /* PHY regs */
 		reg-names = "phy_mem";
 		#phy-cells = <0>;
+		ufs-qcom-crypto = <&ufs_ice>;
 
 		lanes-per-direction = <2>;
 
@@ -1753,6 +1835,7 @@
 		interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
 		phys = <&ufsphy_mem>;
 		phy-names = "ufsphy";
+		ufs-qcom-crypto = <&ufs_ice>;
 
 		lanes-per-direction = <2>;
 		dev-ref-clk-freq = <0>; /* 19.2 MHz */
@@ -2062,9 +2145,9 @@
 	spss_utils: qcom,spss_utils {
 		compatible = "qcom,spss-utils";
 		/* spss fuses physical address */
-		qcom,spss-fuse1-addr = <0x007841c4>;
+		qcom,spss-fuse1-addr = <0x00780234>;
 		qcom,spss-fuse1-bit = <27>;
-		qcom,spss-fuse2-addr = <0x007841c4>;
+		qcom,spss-fuse2-addr = <0x00780234>;
 		qcom,spss-fuse2-bit = <26>;
 		qcom,spss-dev-firmware-name  = "spss1d";  /* 8 chars max */
 		qcom,spss-test-firmware-name = "spss1t";  /* 8 chars max */
@@ -2126,65 +2209,85 @@
 		qcom,use-ipa-tethering-bridge;
 		qcom,mhi-event-ring-id-limits = <9 11>; /* start and end */
 		qcom,modem-cfg-emb-pipe-flt;
+		qcom,ipa-wdi3-over-gsi;
 		qcom,use-ipa-pm;
+		qcom,arm-smmu;
+		qcom,smmu-fast-map;
 		qcom,bandwidth-vote-for-ipa;
 		qcom,use-64-bit-dma-mask;
 		qcom,msm-bus,name = "ipa";
 		qcom,msm-bus,num-cases = <5>;
-		qcom,msm-bus,num-paths = <4>;
+		qcom,msm-bus,num-paths = <5>;
 		qcom,msm-bus,vectors-KBps =
 		/* No vote */
-		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_EBI_CH0 0 0>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_LLCC 0 0>,
+		<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0 0 0>,
 		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 0 0>,
 		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 0 0>,
 		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 0>,
 
 		/* SVS2 */
-		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_EBI_CH0 80000 600000>,
-		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 80000 350000>,
-		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 40000 40000>,
-		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 125>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_LLCC 150000 600000>,
+		<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0 150000 1804000>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 75000 300000>,
+		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 0 76800>,
+		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 150>,
 
 		/* SVS */
-		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_EBI_CH0 80000 640000>,
-		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 80000 640000>,
-		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 80000 80000>,
-		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 250>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_LLCC 625000 1200000>,
+		<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0 625000 3072000>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 312500 700000>,
+		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 0 150000>,
+		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 240>,
 
 		/* NOMINAL */
-		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_EBI_CH0 206000 960000>,
-		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 206000 960000>,
-		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 206000 160000>,
-		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 500>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_LLCC 1250000 2400000>,
+		<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0 1250000 6220800>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 625000 1500000>,
+		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 0 400000>,
+		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 466>,
 
 		/* TURBO */
-		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_EBI_CH0 206000 3600000>,
-		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 206000 3600000>,
-		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 206000 300000>,
-		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 600>;
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_LLCC 2000000 3500000>,
+		<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0 2000000 7219200>,
+		<MSM_BUS_MASTER_IPA MSM_BUS_SLAVE_OCIMEM 1000000 1920000>,
+		<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_IPA_CFG 0 400000>,
+		<MSM_BUS_MASTER_IPA_CORE MSM_BUS_SLAVE_IPA_CORE 0 533>;
 
 		qcom,bus-vector-names = "MIN", "SVS2", "SVS", "NOMINAL",
 			"TURBO";
-		qcom,throughput-threshold = <310 600 1000>;
+		qcom,throughput-threshold = <600 2500 5000>;
 		qcom,scaling-exceptions = <>;
-	};
 
-	ipa_smmu_ap: ipa_smmu_ap {
-		compatible = "qcom,ipa-smmu-ap-cb";
-		iommus = <&apps_smmu 0x5C0 0x0>;
-		qcom,iommu-dma = "bypass";
-	};
+		qcom,entire-ipa-block-size = <0x100000>;
+		qcom,register-collection-on-crash;
+		qcom,testbus-collection-on-crash;
+		qcom,non-tn-collection-on-crash;
+		qcom,secure-debug-check-action = <0>;
 
-	ipa_smmu_wlan: ipa_smmu_wlan {
-		compatible = "qcom,ipa-smmu-wlan-cb";
-		iommus = <&apps_smmu 0x5C1 0x0>;
-		qcom,iommu-dma = "bypass";
-	};
+		ipa_smmu_ap: ipa_smmu_ap {
+			compatible = "qcom,ipa-smmu-ap-cb";
+			iommus = <&apps_smmu 0x5C0 0x0>;
+			qcom,iova-mapping = <0x20000000 0x40000000>;
+			qcom,additional-mapping =
+				/* modem tables in IMEM */
+				<0x146BD000 0x146BD000 0x2000>;
+			dma-coherent;
+			qcom,iommu-dma = "disabled";
+		};
 
-	ipa_smmu_uc: ipa_smmu_uc {
-		compatible = "qcom,ipa-smmu-uc-cb";
-		iommus = <&apps_smmu 0x5C2 0x0>;
-		qcom,iommu-dma = "bypass";
+		ipa_smmu_wlan: ipa_smmu_wlan {
+			compatible = "qcom,ipa-smmu-wlan-cb";
+			iommus = <&apps_smmu 0x5C1 0x0>;
+			qcom,iommu-dma = "disabled";
+		};
+
+		ipa_smmu_uc: ipa_smmu_uc {
+			compatible = "qcom,ipa-smmu-uc-cb";
+			iommus = <&apps_smmu 0x5C2 0x0>;
+			qcom,iova-mapping = <0x40000000 0x20000000>;
+			qcom,iommu-dma = "disabled";
+		};
 	};
 
 	qcom,glink {
@@ -2373,7 +2476,8 @@
 	qcom,msm-eud@ff0000 {
 		compatible = "qcom,msm-eud";
 		interrupt-names = "eud_irq";
-		interrupts = <GIC_SPI 491 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-parent = <&pdc>;
+		interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
 		reg = <0x088E0000 0x2000>,
 			<0x088E2000 0x1000>;
 		reg-names = "eud_base", "eud_mode_mgr2";
@@ -2406,7 +2510,7 @@
 		qcom,complete-ramdump;
 
 		/* Inputs from lpass */
-		interrupts-extended = <&pdc 96 IRQ_TYPE_LEVEL_HIGH>,
+		interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
 				<&adsp_smp2p_in 0 0>,
 				<&adsp_smp2p_in 2 0>,
 				<&adsp_smp2p_in 1 0>,
@@ -3284,7 +3388,7 @@
 		reg = <0x64>;
 		fmint-gpio = <&tlmm 51 0>;
 		vdd-supply = <&pm8150a_bob>;
-		rtc6226,vdd-supply-voltage = <3300000 3300000>;
+		rtc6226,vdd-supply-voltage = <3296000 3296000>;
 		vio-supply = <&pm8150_s4>;
 		rtc6226,vio-supply-voltage = <1800000 1800000 >;
 	};
diff --git a/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi b/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
index 8c2dc63..e57052a 100644
--- a/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
@@ -43,37 +43,37 @@
 
 	/* CAM_CC GDSCs */
 	bps_gdsc: qcom,gdsc@ad07004 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xad07004 0x4>;
 		regulator-name = "bps_gdsc";
 	};
 
 	ipe_0_gdsc: qcom,gdsc@ad08004 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xad08004 0x4>;
 		regulator-name = "ipe_0_gdsc";
 	};
 
 	ipe_1_gdsc: qcom,gdsc@ad09004 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xad09004 0x4>;
 		regulator-name = "ipe_1_gdsc";
 	};
 
 	ife_0_gdsc: qcom,gdsc@ad0a004 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xad0a004 0x4>;
 		regulator-name = "ife_0_gdsc";
 	};
 
 	ife_1_gdsc: qcom,gdsc@ad0b004 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xad0b004 0x4>;
 		regulator-name = "ife_1_gdsc";
 	};
 
 	titan_top_gdsc: qcom,gdsc@ad0c1c4 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xad0c1c4 0x4>;
 		regulator-name = "titan_top_gdsc";
 	};
@@ -129,19 +129,19 @@
 
 	/* VIDEO_CC GDSCs */
 	mvsc_gdsc: qcom,gdsc@ab00814 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xab00814 0x4>;
 		regulator-name = "mvsc_gdsc";
 	};
 
 	mvs0_gdsc: qcom,gdsc@ab00874 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xab00874 0x4>;
 		regulator-name = "mvs0_gdsc";
 	};
 
 	mvs1_gdsc: qcom,gdsc@ab008b4 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0xab008b4 0x4>;
 		regulator-name = "mvs1_gdsc";
 	};
diff --git a/arch/arm64/boot/dts/qcom/lito-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/lito-pinctrl.dtsi
index 53d10bc..203e7a6 100644
--- a/arch/arm64/boot/dts/qcom/lito-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-pinctrl.dtsi
@@ -13,6 +13,103 @@
 		interrupt-controller;
 		#interrupt-cells = <2>;
 
+		qupv3_se2_2uart_pins: qupv3_se2_2uart_pins {
+			qupv3_se2_2uart_active: qupv3_se2_2uart_active {
+				mux {
+					pins = "gpio36", "gpio37";
+					function = "qup02";
+				};
+
+				config {
+					pins = "gpio36", "gpio37";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se2_2uart_sleep: qupv3_se2_2uart_sleep {
+				mux {
+					pins = "gpio36", "gpio37";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio36", "gpio37";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
+		qupv3_se5_4uart_pins: qupv3_se5_4uart_pins {
+			qupv3_se5_ctsrx: qupv3_se5_ctsrx {
+				mux {
+					pins = "gpio38", "gpio41";
+					function = "qup05";
+				};
+
+				config {
+					pins = "gpio38", "gpio41";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se5_rts: qupv3_se5_rts {
+				mux {
+					pins = "gpio39";
+					function = "qup05";
+				};
+
+				config {
+					pins = "gpio39";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+
+			qupv3_se5_tx: qupv3_se5_tx {
+				mux {
+					pins = "gpio40";
+					function = "qup05";
+				};
+
+				config {
+					pins = "gpio40";
+					drive-strength = <2>;
+					bias-pull-up;
+				};
+			};
+		};
+
+		qupv3_se8_2uart_pins: qupv3_se8_2uart_pins {
+			qupv3_se8_2uart_active: qupv3_se8_2uart_active {
+				mux {
+					pins = "gpio51", "gpio52";
+					function = "qup12";
+				};
+
+				config {
+					pins = "gpio51", "gpio52";
+					drive-strength = <2>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se8_2uart_sleep: qupv3_se8_2uart_sleep {
+				mux {
+					pins = "gpio51", "gpio52";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio51", "gpio52";
+					drive-strength = <2>;
+					bias-pull-down;
+				};
+			};
+		};
+
 		ufs_dev_reset_assert: ufs_dev_reset_assert {
 			config {
 				pins = "ufs_reset";
diff --git a/arch/arm64/boot/dts/qcom/lito-qupv3.dtsi b/arch/arm64/boot/dts/qcom/lito-qupv3.dtsi
new file mode 100644
index 0000000..e8eb2f4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/lito-qupv3.dtsi
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+&soc {
+	/*QUPv3_0 */
+	qupv3_0: qcom,qupv3_0_geni_se@8c0000 {
+		compatible = "qcom,qupv3-geni-se";
+		reg = <0x8c0000 0x2000>;
+		qcom,bus-mas-id = <MSM_BUS_MASTER_QUP_0>;
+		qcom,bus-slv-id = <MSM_BUS_SLAVE_EBI_CH0>;
+		iommus = <&apps_smmu 0x4e3 0x0>;
+		qcom,iommu-dma-addr-pool = <0x40000000 0xc0000000>;
+		qcom,iommu-dma = "bypass";
+	};
+
+	/* Debug UART Instance for RUMI platform */
+	qupv3_se2_2uart: qcom,qup_uart@888000 {
+		compatible = "qcom,msm-geni-console";
+		reg = <0x888000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>,
+			<&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se2_2uart_active>;
+		pinctrl-1 = <&qupv3_se2_2uart_sleep>;
+		interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,wrapper-core = <&qupv3_0>;
+		status = "disabled";
+	};
+
+	/* 4-wire UART */
+	qupv3_se5_4uart: qcom,qup_uart@894000 {
+		compatible = "qcom,msm-geni-serial-hs";
+		reg = <0x894000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>,
+			<&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se5_ctsrx>, <&qupv3_se5_rts>,
+						<&qupv3_se5_tx>;
+		pinctrl-1 = <&qupv3_se5_ctsrx>, <&qupv3_se5_rts>,
+						<&qupv3_se5_tx>;
+		interrupts-extended = <&intc GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>,
+				<&tlmm 41 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,wrapper-core = <&qupv3_0>;
+		qcom,wakeup-byte = <0xFD>;
+		status = "disabled";
+	};
+
+	/*QUPv3_1 */
+	qupv3_1: qcom,qupv3_1_geni_se@9c0000 {
+		compatible = "qcom,qupv3-geni-se";
+		reg = <0x9c0000 0x2000>;
+		qcom,bus-mas-id = <MSM_BUS_MASTER_QUP_1>;
+		qcom,bus-slv-id = <MSM_BUS_SLAVE_EBI_CH1>;
+		iommus = <&apps_smmu 0x023 0x0>;
+		qcom,iommu-dma-addr-pool = <0x40000000 0xc0000000>;
+		qcom,iommu-dma = "bypass";
+	};
+
+	/* 2-wire UART */
+	qupv3_se8_2uart: qcom,qup_uart@988000 {
+		compatible = "qcom,msm-geni-console";
+		reg = <0x988000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>,
+			<&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se8_2uart_active>;
+		pinctrl-1 = <&qupv3_se8_2uart_sleep>;
+		interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,wrapper-core = <&qupv3_1>;
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/lito-rumi.dtsi b/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
index ce13c7f..093f3d6 100644
--- a/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
@@ -44,6 +44,15 @@
 	};
 };
 
+&qupv3_se8_2uart {
+	status = "disabled";
+};
+
+/*RUMI UART console*/
+&qupv3_se2_2uart {
+	status = "ok";
+};
+
 &wdog {
 	status = "disabled";
 };
diff --git a/arch/arm64/boot/dts/qcom/lito.dtsi b/arch/arm64/boot/dts/qcom/lito.dtsi
index 7e28ef1..7b263ba 100644
--- a/arch/arm64/boot/dts/qcom/lito.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito.dtsi
@@ -24,6 +24,7 @@
 	interrupt-parent = <&intc>;
 
 	aliases {
+		serial0 = &qupv3_se2_2uart;	/*RUMI*/
 		ufshc1 = &ufshc_mem; /* Embedded UFS slot */
 	};
 
@@ -470,6 +471,13 @@
 			no-map;
 			reg = <0x0 0x9f400000 0x0 0xc00000>;
 		};
+		cdsp_mem: cdsp_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
+			reusable;
+			alignment = <0x0 0x400000>;
+			size = <0x0 0x400000>;
+		};
 
 		/* global autoconfigured region for contiguous allocations */
 		linux,cma {
@@ -836,7 +844,7 @@
 		sleep_clk: sleep-clk {
 			compatible = "fixed-clock";
 			clock-output-names = "chip_sleep_clk";
-			clock-frequency = <32764>;
+			clock-frequency = <32000>;
 			#clock-cells = <0>;
 		};
 	};
@@ -869,7 +877,7 @@
 		#clock-cells = <1>;
 	};
 
-	gcc: qcom,gcc {
+	gcc: qcom,gcc@100000 {
 		compatible = "qcom,gcc-lito", "syscon";
 		reg = <0x100000 0x1f0000>;
 		reg-names = "cc_base";
@@ -879,6 +887,29 @@
 		#reset-cells = <1>;
 	};
 
+	camcc: qcom,camcc@ad00000 {
+		compatible = "qcom,lito-camcc", "syscon";
+		reg = <0xad00000 0x10000>;
+		reg-names = "cc_base";
+		vdd_mx-supply = <&VDD_MX_LEVEL>;
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		clock-names = "cfg_ahb_clk";
+		clocks = <&gcc GCC_CAMERA_AHB_CLK>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	videocc: qcom,videocc {
+		compatible = "qcom,lito-videocc", "syscon";
+		reg = <0x0ab00000 0x10000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		clock-names = "cfg_ahb_clk";
+		clocks = <&gcc GCC_VIDEO_AHB_CLK>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
 	ufsphy_mem: ufsphy_mem@1d87000 {
 		reg = <0x1d87000 0xe00>; /* PHY regs */
 		reg-names = "phy_mem";
@@ -1012,20 +1043,6 @@
 		#reset-cells = <1>;
 	};
 
-	videocc: qcom,videocc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "videocc_clocks";
-		#clock-cells = <1>;
-		#reset-cells = <1>;
-	};
-
-	camcc: qcom,camcc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "camcc_clocks";
-		#clock-cells = <1>;
-		#reset-cells = <1>;
-	};
-
 	dispcc: qcom,dispcc {
 		compatible = "qcom,dummycc";
 		clock-output-names = "dispcc_clocks";
@@ -1340,26 +1357,41 @@
 };
 
 &bps_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_CAMERA_AHB_CLK>;
+	qcom,support-hw-trigger;
 	status = "ok";
 };
 
 &ipe_0_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_CAMERA_AHB_CLK>;
+	qcom,support-hw-trigger;
 	status = "ok";
 };
 
 &ipe_1_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_CAMERA_AHB_CLK>;
+	qcom,support-hw-trigger;
 	status = "ok";
 };
 
 &ife_0_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_CAMERA_AHB_CLK>;
 	status = "ok";
 };
 
 &ife_1_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_CAMERA_AHB_CLK>;
 	status = "ok";
 };
 
 &titan_top_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_CAMERA_AHB_CLK>;
 	status = "ok";
 };
 
@@ -1380,13 +1412,23 @@
 };
 
 &mvsc_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_VIDEO_AHB_CLK>;
 	status = "ok";
 };
 
 &mvs0_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_VIDEO_AHB_CLK>;
+	qcom,support-hw-trigger;
 	status = "ok";
 };
 
 &mvs1_gdsc {
+	clock-names = "ahb_clk";
+	clocks = <&gcc GCC_VIDEO_AHB_CLK>;
+	qcom,support-hw-trigger;
 	status = "ok";
 };
+
+#include "lito-qupv3.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
index 4137130..930087a 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
@@ -15,6 +15,7 @@
 		qcom,dynamic;
 		qcom,skip-init;
 		qcom,use-3-lvl-tables;
+		qcom,no-dynamic-asid;
 		#global-interrupts = <2>;
 		#size-cells = <1>;
 		#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-lito.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-lito.dtsi
index 582bac8..8127b96 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-lito.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-lito.dtsi
@@ -7,7 +7,6 @@
 
 &soc {
 	kgsl_smmu: kgsl-smmu@3da0000 {
-		status = "disabled";
 		compatible = "qcom,qsmmu-v500";
 		reg = <0x3da0000 0x10000>,
 		      <0x3dc2000 0x20>;
@@ -20,15 +19,15 @@
 		#size-cells = <1>;
 		#address-cells = <1>;
 		ranges;
-		interrupts = <GIC_SPI 674 IRQ_TYPE_LEVEL_HIGH>,
+		interrupts = <GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 678 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 679 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 680 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 681 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 682 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 683 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 684 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>,
-				<GIC_SPI 686 IRQ_TYPE_LEVEL_HIGH>;
+				<GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>;
 		gfx_0_tbu: gfx_0_tbu@3dc5000 {
 			compatible = "qcom,qsmmuv500-tbu";
 			reg = <0x3dc5000 0x1000>,
@@ -205,6 +204,11 @@
 		};
 	};
 
+	kgsl_iommu_test_device {
+		compatible = "iommu-debug-test";
+		iommus = <&kgsl_smmu 0x7 0x400>;
+	};
+
 	apps_iommu_test_device {
 		compatible = "iommu-debug-test";
 		iommus = <&apps_smmu 0x1 0>;
diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
index ac61470..e81f65f 100644
--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
@@ -128,8 +128,7 @@
 			#address-cells = <1>;
 			#size-cells = <0>;
 			#thermal-sensor-cells = <1>;
-			io-channels = <&pm8150_vadc ADC_XO_THERM_PU2>,
-					<&pm8150_vadc ADC_AMUX_THM1_PU2>,
+			io-channels = <&pm8150_vadc ADC_AMUX_THM1_PU2>,
 					<&pm8150_vadc ADC_AMUX_THM2_PU2>;
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
index e4a9753..6a82fdb 100644
--- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
@@ -256,7 +256,6 @@
 			qcom,default-sink-caps = <5000 3000>, /* 5V @ 3A */
 						 <9000 3000>, /* 9V @ 3A */
 						 <12000 2250>; /* 12V @ 2.25A */
-			status = "disabled";
 		};
 
 		pm8150b_fg: qpnp,fg {
@@ -440,7 +439,7 @@
 				     <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>;
 			interrupt-names = "hap-sc-irq", "hap-play-irq";
 			qcom,actuator-type = "lra";
-			qcom,vmax-mv = <3400>;
+			qcom,vmax-mv = <3600>;
 			qcom,play-rate-us = <6667>;
 			qcom,lra-resonance-sig-shape = "sine";
 			qcom,lra-auto-resonance-mode = "qwd";
@@ -450,9 +449,11 @@
 				/* CLICK */
 				qcom,effect-id = <0>;
 				qcom,wf-vmax-mv = <3600>;
-				qcom,wf-pattern = [3e 3e 3e];
+				qcom,wf-pattern = [3e 3e 3e 3e 3e 3e 3e 3e];
 				qcom,wf-play-rate-us = <6667>;
-				qcom,wf-brake-pattern = [01 00 00 00];
+				qcom,wf-brake-pattern = [00 00 00 00];
+				qcom,wf-repeat-count = <1>;
+				qcom,wf-s-repeat-count = <1>;
 				qcom,lra-auto-resonance-disable;
 			};
 
@@ -460,9 +461,10 @@
 				/* DOUBLE CLICK */
 				qcom,effect-id = <1>;
 				qcom,wf-vmax-mv = <3600>;
-				qcom,wf-pattern = [7e 7e 02 02 02 02 02 02];
-				qcom,wf-play-rate-us = <7143>;
-				qcom,wf-repeat-count = <2>;
+				qcom,wf-pattern = [3e 3e 3e 3e 3e 3e 3e 3e];
+				qcom,wf-play-rate-us = <6667>;
+				qcom,wf-brake-pattern = [00 00 00 00];
+				qcom,wf-repeat-count = <1>;
 				qcom,wf-s-repeat-count = <1>;
 				qcom,lra-auto-resonance-disable;
 			};
@@ -471,8 +473,11 @@
 				/* TICK */
 				qcom,effect-id = <2>;
 				qcom,wf-vmax-mv = <3600>;
-				qcom,wf-pattern = [7e 7e];
-				qcom,wf-play-rate-us = <4000>;
+				qcom,wf-pattern = [3e 3e 3e 3e 3e 3e 3e 3e];
+				qcom,wf-play-rate-us = <6667>;
+				qcom,wf-brake-pattern = [00 00 00 00];
+				qcom,wf-repeat-count = <1>;
+				qcom,wf-s-repeat-count = <1>;
 				qcom,lra-auto-resonance-disable;
 			};
 
@@ -480,8 +485,11 @@
 				/* THUD */
 				qcom,effect-id = <3>;
 				qcom,wf-vmax-mv = <3600>;
-				qcom,wf-pattern = [7e 7e 7e];
+				qcom,wf-pattern = [3e 3e 3e 3e 3e 3e 3e 3e];
 				qcom,wf-play-rate-us = <6667>;
+				qcom,wf-brake-pattern = [00 00 00 00];
+				qcom,wf-repeat-count = <1>;
+				qcom,wf-s-repeat-count = <1>;
 				qcom,lra-auto-resonance-disable;
 			};
 
@@ -489,8 +497,11 @@
 				/* POP */
 				qcom,effect-id = <4>;
 				qcom,wf-vmax-mv = <3600>;
-				qcom,wf-pattern = [7e 7e];
-				qcom,wf-play-rate-us = <5000>;
+				qcom,wf-pattern = [3e 3e 3e 3e 3e 3e 3e 3e];
+				qcom,wf-play-rate-us = <6667>;
+				qcom,wf-brake-pattern = [00 00 00 00];
+				qcom,wf-repeat-count = <1>;
+				qcom,wf-s-repeat-count = <1>;
 				qcom,lra-auto-resonance-disable;
 			};
 
@@ -498,9 +509,11 @@
 				/* HEAVY CLICK */
 				qcom,effect-id = <5>;
 				qcom,wf-vmax-mv = <3600>;
-				qcom,wf-pattern = [7e 7e 7e];
+				qcom,wf-pattern = [3e 3e 3e 3e 3e 3e 3e 3e];
 				qcom,wf-play-rate-us = <6667>;
-				qcom,wf-brake-pattern = [03 00 00 00];
+				qcom,wf-brake-pattern = [00 00 00 00];
+				qcom,wf-repeat-count = <1>;
+				qcom,wf-s-repeat-count = <1>;
 				qcom,lra-auto-resonance-disable;
 			};
 		};
diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig
index 636be7f..b7ba3f6 100644
--- a/arch/arm64/configs/cuttlefish_defconfig
+++ b/arch/arm64/configs/cuttlefish_defconfig
@@ -178,9 +178,12 @@
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_CLS_U32=y
+CONFIG_NET_CLS_BPF=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_U32=y
 CONFIG_NET_CLS_ACT=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
 CONFIG_CFG80211=y
 # CONFIG_CFG80211_DEFAULT_PS is not set
 # CONFIG_CFG80211_CRDA_SUPPORT is not set
@@ -366,12 +369,12 @@
 # CONFIG_PWRSEQ_SIMPLE is not set
 # CONFIG_MMC_BLOCK is not set
 CONFIG_RTC_CLASS=y
-# CONFIG_RTC_HCTOSYS is not set
 # CONFIG_RTC_SYSTOHC is not set
 CONFIG_RTC_DRV_PL031=y
 CONFIG_VIRTIO_PCI=y
 # CONFIG_VIRTIO_PCI_LEGACY is not set
 CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_INPUT=y
 CONFIG_VIRTIO_MMIO=y
 CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
 CONFIG_STAGING=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index f057942..f23250d 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -33,6 +33,7 @@
 # CONFIG_RD_XZ is not set
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
+# CONFIG_FHANDLE is not set
 CONFIG_KALLSYMS_ALL=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_EMBEDDED=y
@@ -97,11 +98,13 @@
 CONFIG_CFQ_GROUP_IOSCHED=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_CMA=y
+CONFIG_CMA_AREAS=16
 CONFIG_ZSMALLOC=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
 CONFIG_XFRM_STATISTICS=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
@@ -111,6 +114,7 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
 CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
@@ -170,9 +174,9 @@
 CONFIG_NETFILTER_XT_MATCH_MAC=y
 CONFIG_NETFILTER_XT_MATCH_MARK=y
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
-CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
@@ -221,6 +225,7 @@
 CONFIG_NET_CLS_U32=y
 CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
 CONFIG_NET_EMATCH_NBYTE=y
@@ -238,6 +243,7 @@
 CONFIG_MSM_BT_POWER=y
 CONFIG_BT_SLIM_QCA6390=y
 CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
 CONFIG_RFKILL=y
 CONFIG_NFC_NQ=y
 CONFIG_FW_LOADER_USER_HELPER=y
@@ -256,6 +262,7 @@
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -282,6 +289,9 @@
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
 CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_USB_LAN78XX=y
 CONFIG_USB_USBNET=y
 CONFIG_WIL6210=m
 CONFIG_CLD_LL_CORE=y
@@ -303,8 +313,8 @@
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVMEM is not set
 CONFIG_SERIAL_MSM_GENI=y
-CONFIG_SERIAL_MSM_GENI_CONSOLE=y
 CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_DIAG_CHAR=y
 CONFIG_MSM_ADSPRPC=y
 CONFIG_I2C_CHARDEV=y
@@ -328,6 +338,7 @@
 CONFIG_THERMAL_WRITABLE_TRIPS=y
 CONFIG_THERMAL_GOV_USER_SPACE=y
 CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
 CONFIG_DEVFREQ_THERMAL=y
 CONFIG_QCOM_SPMI_TEMP_ALARM=y
 CONFIG_THERMAL_TSENS=y
@@ -337,6 +348,7 @@
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
 CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -363,7 +375,9 @@
 CONFIG_MSM_GLOBAL_SYNX=y
 CONFIG_I2C_RTC6226_QCA=y
 CONFIG_DRM=y
+CONFIG_DRM_MSM_DP=y
 CONFIG_DRM_MSM_REGISTER_LOGGING=y
+CONFIG_DRM_SDE_RSC=y
 CONFIG_FB_ARMCLCD=y
 CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
 CONFIG_LOGO=y
@@ -453,9 +467,13 @@
 CONFIG_IPA_WDI_UNIFIED_API=y
 CONFIG_RMNET_IPA3=y
 CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
 CONFIG_MSM_11AD=m
 CONFIG_USB_BAM=y
 CONFIG_QCOM_GENI_SE=y
+CONFIG_IPA3_REGDUMP=y
+CONFIG_QCOM_MDSS_PLL=y
+CONFIG_QCOM_MDSS_DP_PLL=y
 CONFIG_QCOM_CLK_RPMH=y
 CONFIG_SPMI_PMIC_CLKDIV=y
 CONFIG_MSM_CLK_AOP_QMP=y
@@ -482,6 +500,7 @@
 CONFIG_RPMSG_QCOM_GLINK_SPSS=y
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_IPCC=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_KONA_LLCC=y
@@ -503,6 +522,7 @@
 CONFIG_MSM_SERVICE_NOTIFIER=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_QCOM_DCC_V2=y
@@ -552,8 +572,10 @@
 CONFIG_ESOC_MDM_4x=y
 CONFIG_ESOC_MDM_DRV=y
 CONFIG_SENSORS_SSC=y
+CONFIG_QCOM_KGSL=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -572,6 +594,7 @@
 CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
@@ -580,7 +603,7 @@
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
 CONFIG_MAGIC_SYSRQ=y
-CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_TIMEOUT=-1
 CONFIG_SCHEDSTATS=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 4dd0328..8236370 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -34,6 +34,7 @@
 # CONFIG_RD_XZ is not set
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
+# CONFIG_FHANDLE is not set
 CONFIG_BPF_SYSCALL=y
 CONFIG_EMBEDDED=y
 # CONFIG_COMPAT_BRK is not set
@@ -101,11 +102,13 @@
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_CLEANCACHE=y
 CONFIG_CMA=y
+CONFIG_CMA_AREAS=16
 CONFIG_ZSMALLOC=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
 CONFIG_XFRM_STATISTICS=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
@@ -115,6 +118,7 @@
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
 CONFIG_NET_IPVTI=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
@@ -174,9 +178,9 @@
 CONFIG_NETFILTER_XT_MATCH_MAC=y
 CONFIG_NETFILTER_XT_MATCH_MARK=y
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
-CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
@@ -226,6 +230,7 @@
 CONFIG_NET_CLS_U32=y
 CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
 CONFIG_NET_EMATCH_NBYTE=y
@@ -244,7 +249,7 @@
 CONFIG_MSM_BT_POWER=y
 CONFIG_BT_SLIM_QCA6390=y
 CONFIG_CFG80211=y
-# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_CFG80211_INTERNAL_REGDB=y
 CONFIG_RFKILL=y
 CONFIG_NFC_NQ=y
 CONFIG_FW_LOADER_USER_HELPER=y
@@ -264,6 +269,7 @@
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -284,11 +290,13 @@
 CONFIG_DUMMY=y
 CONFIG_TUN=y
 CONFIG_RMNET=y
-CONFIG_PHYLIB=y
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
 CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_USB_LAN78XX=y
 CONFIG_WIL6210=m
 CONFIG_CLD_LL_CORE=y
 CONFIG_CNSS2=y
@@ -309,11 +317,13 @@
 # CONFIG_SERIO_SERPORT is not set
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
 CONFIG_SERIAL_MSM_GENI=y
 CONFIG_SERIAL_MSM_GENI_CONSOLE=y
 CONFIG_SERIAL_DEV_BUS=y
 CONFIG_TTY_PRINTK=y
 CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
 CONFIG_DIAG_CHAR=y
 CONFIG_MSM_ADSPRPC=y
 CONFIG_I2C_CHARDEV=y
@@ -337,6 +347,7 @@
 CONFIG_THERMAL_WRITABLE_TRIPS=y
 CONFIG_THERMAL_GOV_USER_SPACE=y
 CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
 CONFIG_DEVFREQ_THERMAL=y
 CONFIG_QCOM_SPMI_TEMP_ALARM=y
 CONFIG_THERMAL_TSENS=y
@@ -346,6 +357,7 @@
 CONFIG_QTI_BCL_PMIC5=y
 CONFIG_QTI_BCL_SOC_DRIVER=y
 CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -372,8 +384,9 @@
 CONFIG_MSM_GLOBAL_SYNX=y
 CONFIG_I2C_RTC6226_QCA=y
 CONFIG_DRM=y
+CONFIG_DRM_MSM_DP=y
 CONFIG_DRM_MSM_REGISTER_LOGGING=y
-CONFIG_FB_VIRTUAL=y
+CONFIG_DRM_SDE_RSC=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
@@ -468,10 +481,13 @@
 CONFIG_IPA_WDI_UNIFIED_API=y
 CONFIG_RMNET_IPA3=y
 CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
 CONFIG_MSM_11AD=m
 CONFIG_USB_BAM=y
 CONFIG_QCOM_GENI_SE=y
+CONFIG_IPA3_REGDUMP=y
 CONFIG_QCOM_MDSS_PLL=y
+CONFIG_QCOM_MDSS_DP_PLL=y
 CONFIG_QCOM_CLK_RPMH=y
 CONFIG_SPMI_PMIC_CLKDIV=y
 CONFIG_MSM_CLK_AOP_QMP=y
@@ -498,6 +514,7 @@
 CONFIG_RPMSG_QCOM_GLINK_SPSS=y
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_IPCC=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_KONA_LLCC=y
@@ -519,6 +536,7 @@
 CONFIG_MSM_SERVICE_NOTIFIER=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_QCOM_DCC_V2=y
@@ -577,6 +595,7 @@
 CONFIG_QCOM_KGSL=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V2=y
@@ -598,6 +617,7 @@
 CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
@@ -622,7 +642,7 @@
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_WQ_WATCHDOG=y
-CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_TIMEOUT=-1
 CONFIG_PANIC_ON_SCHED_BUG=y
 CONFIG_PANIC_ON_RT_THROTTLING=y
 CONFIG_SCHEDSTATS=y
@@ -649,6 +669,7 @@
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_MEMTEST=y
 CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
 CONFIG_CORESIGHT=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index 03e639a..3898363 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -219,7 +219,6 @@
 CONFIG_QRTR_SMD=y
 CONFIG_BT=y
 CONFIG_CFG80211=y
-# CONFIG_CFG80211_CRDA_SUPPORT is not set
 CONFIG_RFKILL=y
 CONFIG_FW_LOADER_USER_HELPER=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
@@ -264,6 +263,8 @@
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_CONSOLE=y
 CONFIG_SERIAL_DEV_BUS=y
 CONFIG_TTY_PRINTK=y
 CONFIG_HW_RANDOM=y
@@ -320,15 +321,22 @@
 CONFIG_USB_DWC3_MSM=y
 CONFIG_USB_ISP1760=y
 CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_USB_QCOM_EMU_PHY=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_VBUS_DRAW=900
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_CONFIGFS_F_HID=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_TEST=y
@@ -350,9 +358,12 @@
 CONFIG_ION=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QCOM_GENI_SE=y
 # CONFIG_QCOM_A53PLL is not set
 CONFIG_QCOM_CLK_RPMH=y
 CONFIG_SM_GCC_LITO=y
+CONFIG_SM_VIDEOCC_LITO=y
+CONFIG_SM_CAMCC_LITO=y
 CONFIG_HWSPINLOCK=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_MAILBOX=y
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index bb139e0..2870259 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -390,27 +390,33 @@
  * 	size:		size of the region
  * 	Corrupts:	kaddr, size, tmp1, tmp2
  */
+	.macro __dcache_op_workaround_clean_cache, op, kaddr
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+	dc	\op, \kaddr
+alternative_else
+	dc	civac, \kaddr
+alternative_endif
+	.endm
+
 	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
 	dcache_line_size \tmp1, \tmp2
 	add	\size, \kaddr, \size
 	sub	\tmp2, \tmp1, #1
 	bic	\kaddr, \kaddr, \tmp2
 9998:
-	.if	(\op == cvau || \op == cvac)
-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
-	dc	\op, \kaddr
-alternative_else
-	dc	civac, \kaddr
-alternative_endif
-	.elseif	(\op == cvap)
-alternative_if ARM64_HAS_DCPOP
-	sys 3, c7, c12, 1, \kaddr	// dc cvap
-alternative_else
-	dc	cvac, \kaddr
-alternative_endif
+	.ifc	\op, cvau
+	__dcache_op_workaround_clean_cache \op, \kaddr
+	.else
+	.ifc	\op, cvac
+	__dcache_op_workaround_clean_cache \op, \kaddr
+	.else
+	.ifc	\op, cvap
+	sys	3, c7, c12, 1, \kaddr	// dc cvap
 	.else
 	dc	\op, \kaddr
 	.endif
+	.endif
+	.endif
 	add	\kaddr, \kaddr, \tmp1
 	cmp	\kaddr, \size
 	b.lo	9998b
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index aa45df7..8b284cb 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -24,6 +24,8 @@
 
 /* Hyp Configuration Register (HCR) bits */
 #define HCR_FWB		(UL(1) << 46)
+#define HCR_API		(UL(1) << 41)
+#define HCR_APK		(UL(1) << 40)
 #define HCR_TEA		(UL(1) << 37)
 #define HCR_TERR	(UL(1) << 36)
 #define HCR_TLOR	(UL(1) << 35)
@@ -87,6 +89,7 @@
 			 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
 			 HCR_FMO | HCR_IMO)
 #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
+#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
 #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
 
 /* TCR_EL2 Registers bits */
@@ -104,7 +107,7 @@
 			 TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
 
 /* VTCR_EL2 Registers bits */
-#define VTCR_EL2_RES1		(1 << 31)
+#define VTCR_EL2_RES1		(1U << 31)
 #define VTCR_EL2_HD		(1 << 22)
 #define VTCR_EL2_HA		(1 << 21)
 #define VTCR_EL2_PS_MASK	TCR_EL2_PS_MASK
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 3445e15..4189d86 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -76,12 +76,17 @@
 /*
  * KASAN requires 1/8th of the kernel virtual address space for the shadow
  * region. KASAN can bloat the stack significantly, so double the (minimum)
- * stack size when KASAN is in use.
+ * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is
+ * on.
  */
 #ifdef CONFIG_KASAN
 #define KASAN_SHADOW_SCALE_SHIFT 3
 #define KASAN_SHADOW_SIZE	(UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
+#ifdef CONFIG_KASAN_EXTRA
+#define KASAN_THREAD_SHIFT	2
+#else
 #define KASAN_THREAD_SHIFT	1
+#endif /* CONFIG_KASAN_EXTRA */
 #else
 #define KASAN_SHADOW_SIZE	(0)
 #define KASAN_THREAD_SHIFT	0
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index e0d0f5b..d520518 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -40,8 +40,9 @@
  * The following SVCs are ARM private.
  */
 #define __ARM_NR_COMPAT_BASE		0x0f0000
-#define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
-#define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
+#define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE + 2)
+#define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE + 5)
+#define __ARM_NR_COMPAT_END		(__ARM_NR_COMPAT_BASE + 0x800)
 
 #define __NR_compat_syscalls		399
 #endif
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b085306..651a06b 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -494,10 +494,9 @@
 #endif
 
 	/* Hyp configuration. */
-	mov	x0, #HCR_RW			// 64-bit EL1
+	mov_q	x0, HCR_HOST_NVHE_FLAGS
 	cbz	x2, set_hcr
-	orr	x0, x0, #HCR_TGE		// Enable Host Extensions
-	orr	x0, x0, #HCR_E2H
+	mov_q	x0, HCR_HOST_VHE_FLAGS
 set_hcr:
 	msr	hcr_el2, x0
 	isb
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 29cdc99..9859e11 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -299,8 +299,10 @@
 		dcache_clean_range(__idmap_text_start, __idmap_text_end);
 
 		/* Clean kvm setup code to PoC? */
-		if (el2_reset_needed())
+		if (el2_reset_needed()) {
 			dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
+			dcache_clean_range(__hyp_text_start, __hyp_text_end);
+		}
 
 		/* make the crash dump kernel image protected again */
 		crash_post_resume();
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index e1261fb..17f325b 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -28,6 +28,8 @@
 #include <asm/virt.h>
 
 	.text
+	.pushsection	.hyp.text, "ax"
+
 	.align 11
 
 ENTRY(__hyp_stub_vectors)
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index a820ed0..8da289d 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -76,16 +76,6 @@
 __efistub_stext_offset = stext - _text;
 
 /*
- * Prevent the symbol aliases below from being emitted into the kallsyms
- * table, by forcing them to be absolute symbols (which are conveniently
- * ignored by scripts/kallsyms) rather than section relative symbols.
- * The distinction is only relevant for partial linking, and only for symbols
- * that are defined within a section declaration (which is not the case for
- * the definitions below) so the resulting values will be identical.
- */
-#define KALLSYMS_HIDE(sym)	ABSOLUTE(sym)
-
-/*
  * The EFI stub has its own symbol namespace prefixed by __efistub_, to
  * isolate it from the kernel proper. The following symbols are legally
  * accessed by the stub, so provide some aliases to make them accessible.
@@ -94,28 +84,28 @@
  * linked at. The routines below are all implemented in assembler in a
  * position independent manner
  */
-__efistub_memcmp		= KALLSYMS_HIDE(__pi_memcmp);
-__efistub_memchr		= KALLSYMS_HIDE(__pi_memchr);
-__efistub_memcpy		= KALLSYMS_HIDE(__pi_memcpy);
-__efistub_memmove		= KALLSYMS_HIDE(__pi_memmove);
-__efistub_memset		= KALLSYMS_HIDE(__pi_memset);
-__efistub_strlen		= KALLSYMS_HIDE(__pi_strlen);
-__efistub_strnlen		= KALLSYMS_HIDE(__pi_strnlen);
-__efistub_strcmp		= KALLSYMS_HIDE(__pi_strcmp);
-__efistub_strncmp		= KALLSYMS_HIDE(__pi_strncmp);
-__efistub_strrchr		= KALLSYMS_HIDE(__pi_strrchr);
-__efistub___flush_dcache_area	= KALLSYMS_HIDE(__pi___flush_dcache_area);
+__efistub_memcmp		= __pi_memcmp;
+__efistub_memchr		= __pi_memchr;
+__efistub_memcpy		= __pi_memcpy;
+__efistub_memmove		= __pi_memmove;
+__efistub_memset		= __pi_memset;
+__efistub_strlen		= __pi_strlen;
+__efistub_strnlen		= __pi_strnlen;
+__efistub_strcmp		= __pi_strcmp;
+__efistub_strncmp		= __pi_strncmp;
+__efistub_strrchr		= __pi_strrchr;
+__efistub___flush_dcache_area	= __pi___flush_dcache_area;
 
 #ifdef CONFIG_KASAN
-__efistub___memcpy		= KALLSYMS_HIDE(__pi_memcpy);
-__efistub___memmove		= KALLSYMS_HIDE(__pi_memmove);
-__efistub___memset		= KALLSYMS_HIDE(__pi_memset);
+__efistub___memcpy		= __pi_memcpy;
+__efistub___memmove		= __pi_memmove;
+__efistub___memset		= __pi_memset;
 #endif
 
-__efistub__text			= KALLSYMS_HIDE(_text);
-__efistub__end			= KALLSYMS_HIDE(_end);
-__efistub__edata		= KALLSYMS_HIDE(_edata);
-__efistub_screen_info		= KALLSYMS_HIDE(screen_info);
+__efistub__text			= _text;
+__efistub__end			= _end;
+__efistub__edata		= _edata;
+__efistub_screen_info		= screen_info;
 
 #endif
 
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index f0e6ab8..b09b6f7 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -14,6 +14,7 @@
 #include <linux/sched.h>
 #include <linux/types.h>
 
+#include <asm/cacheflush.h>
 #include <asm/fixmap.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
@@ -43,7 +44,7 @@
 	return ret;
 }
 
-static __init const u8 *get_cmdline(void *fdt)
+static __init const u8 *kaslr_get_cmdline(void *fdt)
 {
 	static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
 
@@ -87,6 +88,7 @@
 	 * we end up running with module randomization disabled.
 	 */
 	module_alloc_base = (u64)_etext - MODULES_VSIZE;
+	__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
 
 	/*
 	 * Try to map the FDT early. If this fails, we simply bail,
@@ -109,7 +111,7 @@
 	 * Check if 'nokaslr' appears on the command line, and
 	 * return 0 if that is the case.
 	 */
-	cmdline = get_cmdline(fdt);
+	cmdline = kaslr_get_cmdline(fdt);
 	str = strstr(cmdline, "nokaslr");
 	if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
 		return 0;
@@ -169,5 +171,8 @@
 	module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
 	module_alloc_base &= PAGE_MASK;
 
+	__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+	__flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+
 	return offset;
 }
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index d89eb53..c1bb288 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -30,7 +30,7 @@
 #include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 
-static DEFINE_PER_CPU(bool, is_hotplugging);
+static DEFINE_PER_CPU(bool, perf_event_is_hotplugging);
 
 /*
  * ARMv8 PMUv3 Performance Events handling code.
@@ -1148,7 +1148,7 @@
 	if (!cpu_pmu)
 		return;
 
-	if (__this_cpu_read(is_hotplugging))
+	if (__this_cpu_read(perf_event_is_hotplugging))
 		return;
 
 	hw_events = this_cpu_ptr(cpu_pmu->hw_events);
@@ -1385,13 +1385,13 @@
 #ifdef CONFIG_HOTPLUG_CPU
 static int perf_event_hotplug_coming_up(unsigned int cpu)
 {
-	per_cpu(is_hotplugging, cpu) = false;
+	per_cpu(perf_event_is_hotplugging, cpu) = false;
 	return 0;
 }
 
 static int perf_event_hotplug_going_down(unsigned int cpu)
 {
-	per_cpu(is_hotplugging, cpu) = true;
+	per_cpu(perf_event_is_hotplugging, cpu) = true;
 	return 0;
 }
 
@@ -1428,7 +1428,7 @@
 	int ret, cpu;
 
 	for_each_possible_cpu(cpu)
-		per_cpu(is_hotplugging, cpu) = false;
+		per_cpu(perf_event_is_hotplugging, cpu) = false;
 
 	ret = perf_event_cpu_hp_init();
 	if (ret)
@@ -1447,6 +1447,7 @@
 	.driver		= {
 		.name	= ARMV8_PMU_PDEV_NAME,
 		.of_match_table = armv8_pmu_of_device_ids,
+		.suppress_bind_attrs = true,
 	},
 	.probe		= armv8_pmu_device_probe,
 };
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index be938ea..dd1e817 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -250,12 +250,12 @@
 			u32	data;
 
 			if (probe_kernel_address(p, data))
-				printk(KERN_DEBUG " ********");
+				pr_cont(" ********");
 			else
-				printk(KERN_DEBUG " %08x", data);
+				pr_cont(" %08x", data);
 			++p;
 		}
-		printk(KERN_DEBUG "\n");
+		pr_cont("\n");
 	}
 }
 
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index a610982..010212d 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -66,12 +66,11 @@
 /*
  * Handle all unrecognised system calls.
  */
-long compat_arm_syscall(struct pt_regs *regs)
+long compat_arm_syscall(struct pt_regs *regs, int scno)
 {
 	siginfo_t info;
-	unsigned int no = regs->regs[7];
 
-	switch (no) {
+	switch (scno) {
 	/*
 	 * Flush a region from virtual address 'r0' to virtual address 'r1'
 	 * _exclusive_.  There is no alignment requirement on either address;
@@ -102,12 +101,12 @@
 
 	default:
 		/*
-		 * Calls 9f00xx..9f07ff are defined to return -ENOSYS
+		 * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
 		 * if not implemented, rather than raising SIGILL. This
 		 * way the calling program can gracefully determine whether
 		 * a feature is supported.
 		 */
-		if ((no & 0xffff) <= 0x7ff)
+		if (scno < __ARM_NR_COMPAT_END)
 			return -ENOSYS;
 		break;
 	}
@@ -119,6 +118,6 @@
 	info.si_addr  = (void __user *)instruction_pointer(regs) -
 			 (compat_thumb_mode(regs) ? 2 : 4);
 
-	arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, no);
+	arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, scno);
 	return 0;
 }
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 032d223..5610ac0 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -13,16 +13,15 @@
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 
-long compat_arm_syscall(struct pt_regs *regs);
-
+long compat_arm_syscall(struct pt_regs *regs, int scno);
 long sys_ni_syscall(void);
 
-asmlinkage long do_ni_syscall(struct pt_regs *regs)
+static long do_ni_syscall(struct pt_regs *regs, int scno)
 {
 #ifdef CONFIG_COMPAT
 	long ret;
 	if (is_compat_task()) {
-		ret = compat_arm_syscall(regs);
+		ret = compat_arm_syscall(regs, scno);
 		if (ret != -ENOSYS)
 			return ret;
 	}
@@ -47,7 +46,7 @@
 		syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
 		ret = __invoke_syscall(regs, syscall_fn);
 	} else {
-		ret = do_ni_syscall(regs);
+		ret = do_ni_syscall(regs, scno);
 	}
 
 	regs->regs[0] = ret;
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 605d1b6..74e469f 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -99,7 +99,8 @@
 		*(.discard)
 		*(.discard.*)
 		*(.interp .dynamic)
-		*(.dynsym .dynstr .hash)
+		*(.dynsym .dynstr .hash .gnu.hash)
+		*(.eh_frame)
 	}
 
 	. = KIMAGE_VADDR + TEXT_OFFSET;
@@ -176,12 +177,12 @@
 
 	PERCPU_SECTION(L1_CACHE_BYTES)
 
-	.rela : ALIGN(8) {
+	.rela.dyn : ALIGN(8) {
 		*(.rela .rela*)
 	}
 
-	__rela_offset	= ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
-	__rela_size	= SIZEOF(.rela);
+	__rela_offset	= ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
+	__rela_size	= SIZEOF(.rela.dyn);
 
 	. = ALIGN(SEGMENT_ALIGN);
 	__initdata_end = .;
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ca46153..a1c32c1 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -157,7 +157,7 @@
 	mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
 
 	write_sysreg(mdcr_el2, mdcr_el2);
-	write_sysreg(HCR_RW, hcr_el2);
+	write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
 	write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
 }
 
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 131c777..c041eab 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -15,14 +15,19 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/irqflags.h>
+
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
 #include <asm/tlbflush.h>
 
-static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
+						 unsigned long *flags)
 {
 	u64 val;
 
+	local_irq_save(*flags);
+
 	/*
 	 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
 	 * most TLB operations target EL2/EL0. In order to affect the
@@ -37,7 +42,8 @@
 	isb();
 }
 
-static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
+						  unsigned long *flags)
 {
 	write_sysreg(kvm->arch.vttbr, vttbr_el2);
 	isb();
@@ -48,7 +54,8 @@
 			    __tlb_switch_to_guest_vhe,
 			    ARM64_HAS_VIRT_HOST_EXTN);
 
-static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
+						unsigned long flags)
 {
 	/*
 	 * We're done with the TLB operation, let's restore the host's
@@ -56,9 +63,12 @@
 	 */
 	write_sysreg(0, vttbr_el2);
 	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+	isb();
+	local_irq_restore(flags);
 }
 
-static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
+						 unsigned long flags)
 {
 	write_sysreg(0, vttbr_el2);
 }
@@ -70,11 +80,13 @@
 
 void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
+	unsigned long flags;
+
 	dsb(ishst);
 
 	/* Switch to requested VMID */
 	kvm = kern_hyp_va(kvm);
-	__tlb_switch_to_guest()(kvm);
+	__tlb_switch_to_guest()(kvm, &flags);
 
 	/*
 	 * We could do so much better if we had the VA as well.
@@ -117,36 +129,39 @@
 	if (!has_vhe() && icache_is_vpipt())
 		__flush_icache_all();
 
-	__tlb_switch_to_host()(kvm);
+	__tlb_switch_to_host()(kvm, flags);
 }
 
 void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
 {
+	unsigned long flags;
+
 	dsb(ishst);
 
 	/* Switch to requested VMID */
 	kvm = kern_hyp_va(kvm);
-	__tlb_switch_to_guest()(kvm);
+	__tlb_switch_to_guest()(kvm, &flags);
 
 	__tlbi(vmalls12e1is);
 	dsb(ish);
 	isb();
 
-	__tlb_switch_to_host()(kvm);
+	__tlb_switch_to_host()(kvm, flags);
 }
 
 void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
 {
 	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+	unsigned long flags;
 
 	/* Switch to requested VMID */
-	__tlb_switch_to_guest()(kvm);
+	__tlb_switch_to_guest()(kvm, &flags);
 
 	__tlbi(vmalle1);
 	dsb(nsh);
 	isb();
 
-	__tlb_switch_to_host()(kvm);
+	__tlb_switch_to_host()(kvm, flags);
 }
 
 void __hyp_text __kvm_flush_vm_context(void)
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 0369e84..000fb44 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -285,6 +285,9 @@
  *	- size    - size in question
  */
 ENTRY(__clean_dcache_area_pop)
+	alternative_if_not ARM64_HAS_DCPOP
+	b	__clean_dcache_area_poc
+	alternative_else_nop_endif
 	dcache_by_line_op cvap, sy, x0, x1, x2, x3
 	ret
 ENDPIPROC(__clean_dcache_area_pop)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 6aa21ed..09577b5 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -1114,6 +1114,11 @@
 			naddr + nsize);
 		return;
 	}
+	if (naddr == 0 || nsize == 0) {
+		dev_err(dev, "Invalid #address-cells %d or #size-cells %d\n",
+			naddr, nsize);
+		return;
+	}
 
 	*dma_addr = of_read_number(ranges, naddr);
 	*dma_size = of_read_number(ranges + naddr, nsize);
@@ -1238,6 +1243,11 @@
 	struct iommu_domain *domain;
 	struct iommu_group *group = dev->iommu_group;
 
+	if (!dev || !mapping) {
+		pr_err("%s: Error input is NULL\n", __func__);
+		return -EINVAL;
+	}
+
 	if (!group) {
 		dev_err(dev, "No iommu associated with device\n");
 		return -EINVAL;
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index cc305b3..fb10aa0 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -33,7 +33,11 @@
 		__clean_dcache_area_pou(kaddr, len);
 		__flush_icache_all();
 	} else {
-		flush_icache_range(addr, addr + len);
+		/*
+		 * Don't issue kick_all_cpus_sync() after I-cache invalidation
+		 * for user mappings.
+		 */
+		__flush_icache_range(addr, addr + len);
 	}
 }
 
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 3bc6139..55d16c5 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -785,7 +785,8 @@
 __initcall(register_mem_limit_dumper);
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
+int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
+		bool want_memblock)
 {
 	pg_data_t *pgdat;
 	unsigned long start_pfn = start >> PAGE_SHIFT;
@@ -833,7 +834,7 @@
 
 	pgdat = NODE_DATA(nid);
 
-	ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
+	ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
 
 	/*
 	 * Make the pages usable after they have been added.
@@ -874,7 +875,7 @@
 
 }
 
-int arch_remove_memory(u64 start, u64 size)
+int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
 {
 	unsigned long start_pfn = start >> PAGE_SHIFT;
 	unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -883,7 +884,7 @@
 	int ret = 0;
 
 	zone = page_zone(page);
-	ret = __remove_pages(zone, start_pfn, nr_pages);
+	ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
 	WARN_ON_ONCE(ret);
 
 	kernel_physical_mapping_remove(start, start + size);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index ca1feee..fdf213a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1165,7 +1165,7 @@
 			p = vmemmap_alloc_block_buf(PMD_SIZE, node);
 			if (!p) {
 #ifdef CONFIG_MEMORY_HOTPLUG
-				vmemmap_free(start, end);
+				vmemmap_free(start, end, altmap);
 #endif
 				ret = -ENOMEM;
 				break;
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index 6181e41..fe3ddd7 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -55,12 +55,12 @@
  */
 #ifdef CONFIG_SUN3
 #define PTRS_PER_PTE   16
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PGD   2048
 #elif defined(CONFIG_COLDFIRE)
 #define PTRS_PER_PTE	512
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #define PTRS_PER_PMD	1
 #define PTRS_PER_PGD	1024
 #else
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 5d3596c..de44899 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -165,8 +165,6 @@
 					be32_to_cpu(m->addr);
 				m68k_memory[m68k_num_memory].size =
 					be32_to_cpu(m->size);
-				memblock_add(m68k_memory[m68k_num_memory].addr,
-					     m68k_memory[m68k_num_memory].size);
 				m68k_num_memory++;
 			} else
 				pr_warn("%s: too many memory chunks\n",
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 4e17ecb..2eb2b31 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -228,6 +228,7 @@
 
 	min_addr = m68k_memory[0].addr;
 	max_addr = min_addr + m68k_memory[0].size;
+	memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
 	for (i = 1; i < m68k_num_memory;) {
 		if (m68k_memory[i].addr < min_addr) {
 			printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
@@ -238,6 +239,7 @@
 				(m68k_num_memory - i) * sizeof(struct m68k_mem_info));
 			continue;
 		}
+		memblock_add(m68k_memory[i].addr, m68k_memory[i].size);
 		addr = m68k_memory[i].addr + m68k_memory[i].size;
 		if (addr > max_addr)
 			max_addr = addr;
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 7b650ab..2ca5985 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -63,7 +63,7 @@
 
 #include <asm-generic/4level-fixup.h>
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 3551199..201caf2 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -794,6 +794,7 @@
 	select SYS_SUPPORTS_HIGHMEM
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select ZONE_DMA32 if 64BIT
+	select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
 
 config SIBYTE_LITTLESUR
 	bool "Sibyte BCM91250C2-LittleSur"
@@ -814,6 +815,7 @@
 	select SYS_HAS_CPU_SB1
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
+	select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
 
 config SIBYTE_BIGSUR
 	bool "Sibyte BCM91480B-BigSur"
@@ -826,6 +828,7 @@
 	select SYS_SUPPORTS_HIGHMEM
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select ZONE_DMA32 if 64BIT
+	select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
 
 config SNI_RM
 	bool "SNI RM200/300/400"
@@ -3149,6 +3152,7 @@
 config MIPS32_N32
 	bool "Kernel support for n32 binaries"
 	depends on 64BIT
+	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
 	select COMPAT
 	select MIPS32_COMPAT
 	select SYSVIPC_COMPAT if SYSVIPC
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 6054d49..fe37735 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -173,6 +173,31 @@
 	pm_power_off = bcm47xx_machine_halt;
 }
 
+#ifdef CONFIG_BCM47XX_BCMA
+static struct device * __init bcm47xx_setup_device(void)
+{
+	struct device *dev;
+	int err;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return NULL;
+
+	err = dev_set_name(dev, "bcm47xx_soc");
+	if (err) {
+		pr_err("Failed to set SoC device name: %d\n", err);
+		kfree(dev);
+		return NULL;
+	}
+
+	err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	if (err)
+		pr_err("Failed to set SoC DMA mask: %d\n", err);
+
+	return dev;
+}
+#endif
+
 /*
  * This finishes bus initialization doing things that were not possible without
  * kmalloc. Make sure to call it late enough (after mm_init).
@@ -183,6 +208,10 @@
 	if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
 		int err;
 
+		bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
+		if (!bcm47xx_bus.bcma.dev)
+			panic("Failed to setup SoC device\n");
+
 		err = bcma_host_soc_init(&bcm47xx_bus.bcma);
 		if (err)
 			panic("Failed to initialize BCMA bus (err %d)", err);
@@ -235,6 +264,8 @@
 #endif
 #ifdef CONFIG_BCM47XX_BCMA
 	case BCM47XX_BUS_TYPE_BCMA:
+		if (device_register(bcm47xx_bus.bcma.dev))
+			pr_err("Failed to register SoC device\n");
 		bcma_bus_register(&bcm47xx_bus.bcma.bus);
 		break;
 #endif
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 37fe58c..542c3ed 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -13,6 +13,7 @@
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include "../../../../include/linux/sizes.h"
 
 int main(int argc, char *argv[])
 {
@@ -45,11 +46,11 @@
 	vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;
 
 	/*
-	 * Align with 16 bytes: "greater than that used for any standard data
-	 * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition).
+	 * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE,
+	 * which may be as large as 64KB depending on the kernel configuration.
 	 */
 
-	vmlinuz_load_addr += (16 - vmlinux_size % 16);
+	vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);
 
 	printf("0x%llx\n", vmlinuz_load_addr);
 
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 6c79e8a..3ddbb98 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -286,7 +286,8 @@
 	case 3:
 		return CVMX_HELPER_INTERFACE_MODE_LOOP;
 	case 4:
-		return CVMX_HELPER_INTERFACE_MODE_RGMII;
+		/* TODO: Implement support for AGL (RGMII). */
+		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
 	default:
 		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
 	}
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index d4ea7a5..9e80531 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -306,7 +306,7 @@
 {									      \
 	long result;							      \
 									      \
-	if (kernel_uses_llsc && R10000_LLSC_WAR) {			      \
+	if (kernel_uses_llsc) {						      \
 		long temp;						      \
 									      \
 		__asm__ __volatile__(					      \
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index a41059d..ed7ffe4 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -50,7 +50,7 @@
 #define MIPS_CACHE_PINDEX	0x00000020	/* Physically indexed cache */
 
 struct cpuinfo_mips {
-	unsigned long		asid_cache;
+	u64			asid_cache;
 #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
 	unsigned long		asid_mask;
 #endif
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index c9f7e23..59c8b11 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -21,6 +21,7 @@
 #define NODE3_ADDRSPACE_OFFSET 0x300000000000UL
 
 #define pa_to_nid(addr)  (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
+#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT)
 
 #define LEVELS_PER_SLICE 128
 
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index 0740be7..24d6b42 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -7,7 +7,7 @@
 #include <linux/wait.h>
 
 typedef struct {
-	unsigned long asid[NR_CPUS];
+	u64 asid[NR_CPUS];
 	void *vdso;
 	atomic_t fp_mode_switching;
 
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 9441456..a589585 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -76,14 +76,14 @@
  *  All unused by hardware upper bits will be considered
  *  as a software asid extension.
  */
-static unsigned long asid_version_mask(unsigned int cpu)
+static inline u64 asid_version_mask(unsigned int cpu)
 {
 	unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
 
-	return ~(asid_mask | (asid_mask - 1));
+	return ~(u64)(asid_mask | (asid_mask - 1));
 }
 
-static unsigned long asid_first_version(unsigned int cpu)
+static inline u64 asid_first_version(unsigned int cpu)
 {
 	return ~asid_version_mask(cpu) + 1;
 }
@@ -102,14 +102,12 @@
 static inline void
 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
 {
-	unsigned long asid = asid_cache(cpu);
+	u64 asid = asid_cache(cpu);
 
 	if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
 		if (cpu_has_vtag_icache)
 			flush_icache_all();
 		local_flush_tlb_all();	/* start new asid cycle */
-		if (!asid)		/* fix version if needed */
-			asid = asid_first_version(cpu);
 	}
 
 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
index f085fba..b826b84 100644
--- a/arch/mips/include/asm/mmzone.h
+++ b/arch/mips/include/asm/mmzone.h
@@ -7,7 +7,18 @@
 #define _ASM_MMZONE_H_
 
 #include <asm/page.h>
-#include <mmzone.h>
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+# include <mmzone.h>
+#endif
+
+#ifndef pa_to_nid
+#define pa_to_nid(addr) 0
+#endif
+
+#ifndef nid_to_addrbase
+#define nid_to_addrbase(nid) 0
+#endif
 
 #ifdef CONFIG_DISCONTIGMEM
 
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 0036ea0..93a9dce 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -265,6 +265,11 @@
 
 static inline int pmd_present(pmd_t pmd)
 {
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+		return pmd_val(pmd) & _PAGE_PRESENT;
+#endif
+
 	return pmd_val(pmd) != (unsigned long) invalid_pte_table;
 }
 
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 7f12d7e..e519012 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -20,6 +20,7 @@
 #include <asm/cpu-features.h>
 #include <asm/cpu-type.h>
 #include <asm/mipsmtregs.h>
+#include <asm/mmzone.h>
 #include <linux/uaccess.h> /* for uaccess_kernel() */
 
 extern void (*r4k_blast_dcache)(void);
@@ -747,4 +748,25 @@
 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
 
+/* Currently, this is very specific to Loongson-3 */
+#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize)	\
+static inline void blast_##pfx##cache##lsize##_node(long node)		\
+{									\
+	unsigned long start = CAC_BASE | nid_to_addrbase(node);		\
+	unsigned long end = start + current_cpu_data.desc.waysize;	\
+	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
+	unsigned long ws_end = current_cpu_data.desc.ways <<		\
+			       current_cpu_data.desc.waybit;		\
+	unsigned long ws, addr;						\
+									\
+	for (ws = 0; ws < ws_end; ws += ws_inc)				\
+		for (addr = start; addr < end; addr += lsize * 32)	\
+			cache##lsize##_unroll32(addr|ws, indexop);	\
+}
+
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
+
 #endif /* _ASM_R4KCACHE_H */
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 48a9c6b..9df3ebd 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -126,8 +126,8 @@
 
 	/* Map delay slot emulation page */
 	base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
-			   VM_READ|VM_WRITE|VM_EXEC|
-			   VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+			   VM_READ | VM_EXEC |
+			   VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
 			   0, NULL);
 	if (IS_ERR_VALUE(base)) {
 		ret = base;
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index f0bc331..c4ef1c3 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -224,9 +224,11 @@
 	.irq_set_type = ltq_eiu_settype,
 };
 
-static void ltq_hw_irqdispatch(int module)
+static void ltq_hw_irq_handler(struct irq_desc *desc)
 {
+	int module = irq_desc_get_irq(desc) - 2;
 	u32 irq;
+	int hwirq;
 
 	irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
 	if (irq == 0)
@@ -237,7 +239,8 @@
 	 * other bits might be bogus
 	 */
 	irq = __fls(irq);
-	do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
+	hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
+	generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
 
 	/* if this is a EBU irq, we need to ack it or get a deadlock */
 	if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
@@ -245,49 +248,6 @@
 			LTQ_EBU_PCC_ISTAT);
 }
 
-#define DEFINE_HWx_IRQDISPATCH(x)					\
-	static void ltq_hw ## x ## _irqdispatch(void)			\
-	{								\
-		ltq_hw_irqdispatch(x);					\
-	}
-DEFINE_HWx_IRQDISPATCH(0)
-DEFINE_HWx_IRQDISPATCH(1)
-DEFINE_HWx_IRQDISPATCH(2)
-DEFINE_HWx_IRQDISPATCH(3)
-DEFINE_HWx_IRQDISPATCH(4)
-
-#if MIPS_CPU_TIMER_IRQ == 7
-static void ltq_hw5_irqdispatch(void)
-{
-	do_IRQ(MIPS_CPU_TIMER_IRQ);
-}
-#else
-DEFINE_HWx_IRQDISPATCH(5)
-#endif
-
-static void ltq_hw_irq_handler(struct irq_desc *desc)
-{
-	ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
-	unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
-	int irq;
-
-	if (!pending) {
-		spurious_interrupt();
-		return;
-	}
-
-	pending >>= CAUSEB_IP;
-	while (pending) {
-		irq = fls(pending) - 1;
-		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
-		pending &= ~BIT(irq);
-	}
-}
-
 static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
 {
 	struct irq_chip *chip = &ltq_irq_type;
@@ -343,28 +303,10 @@
 	for (i = 0; i < MAX_IM; i++)
 		irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
 
-	if (cpu_has_vint) {
-		pr_info("Setting up vectored interrupts\n");
-		set_vi_handler(2, ltq_hw0_irqdispatch);
-		set_vi_handler(3, ltq_hw1_irqdispatch);
-		set_vi_handler(4, ltq_hw2_irqdispatch);
-		set_vi_handler(5, ltq_hw3_irqdispatch);
-		set_vi_handler(6, ltq_hw4_irqdispatch);
-		set_vi_handler(7, ltq_hw5_irqdispatch);
-	}
-
 	ltq_domain = irq_domain_add_linear(node,
 		(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
 		&irq_domain_ops, 0);
 
-#ifndef CONFIG_MIPS_MT_SMP
-	set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
-		IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
-#else
-	set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
-		IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
-#endif
-
 	/* tell oprofile which irq to use */
 	ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
 
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 5450f4d..e2d46cb 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -214,8 +214,9 @@
 {
 	int isa16 = get_isa16_mode(regs->cp0_epc);
 	mips_instruction break_math;
-	struct emuframe __user *fr;
-	int err, fr_idx;
+	unsigned long fr_uaddr;
+	struct emuframe fr;
+	int fr_idx, ret;
 
 	/* NOP is easy */
 	if (ir == 0)
@@ -250,27 +251,31 @@
 		fr_idx = alloc_emuframe();
 	if (fr_idx == BD_EMUFRAME_NONE)
 		return SIGBUS;
-	fr = &dsemul_page()[fr_idx];
 
 	/* Retrieve the appropriately encoded break instruction */
 	break_math = BREAK_MATH(isa16);
 
 	/* Write the instructions to the frame */
 	if (isa16) {
-		err = __put_user(ir >> 16,
-				 (u16 __user *)(&fr->emul));
-		err |= __put_user(ir & 0xffff,
-				  (u16 __user *)((long)(&fr->emul) + 2));
-		err |= __put_user(break_math >> 16,
-				  (u16 __user *)(&fr->badinst));
-		err |= __put_user(break_math & 0xffff,
-				  (u16 __user *)((long)(&fr->badinst) + 2));
+		union mips_instruction _emul = {
+			.halfword = { ir >> 16, ir }
+		};
+		union mips_instruction _badinst = {
+			.halfword = { break_math >> 16, break_math }
+		};
+
+		fr.emul = _emul.word;
+		fr.badinst = _badinst.word;
 	} else {
-		err = __put_user(ir, &fr->emul);
-		err |= __put_user(break_math, &fr->badinst);
+		fr.emul = ir;
+		fr.badinst = break_math;
 	}
 
-	if (unlikely(err)) {
+	/* Write the frame to user memory */
+	fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
+	ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
+				FOLL_FORCE | FOLL_WRITE);
+	if (unlikely(ret != sizeof(fr))) {
 		MIPS_FPU_EMU_INC_STATS(errors);
 		free_emuframe(fr_idx, current->mm);
 		return SIGBUS;
@@ -282,10 +287,7 @@
 	atomic_set(&current->thread.bd_emu_frame, fr_idx);
 
 	/* Change user register context to execute the frame */
-	regs->cp0_epc = (unsigned long)&fr->emul | isa16;
-
-	/* Ensure the icache observes our newly written frame */
-	flush_cache_sigtramp((unsigned long)&fr->emul);
+	regs->cp0_epc = fr_uaddr | isa16;
 
 	return 0;
 }
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 3466fcd..01848cd 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -245,7 +245,7 @@
 	pmd_t *pmdp;
 	pte_t *ptep;
 
-	pr_debug("cpage[%08lx,%08lx]\n",
+	pr_debug("cpage[%08llx,%08lx]\n",
 		 cpu_context(smp_processor_id(), mm), addr);
 
 	/* No ASID => no such page in the cache.  */
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index a9ef057..05a539d 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -459,11 +459,28 @@
 		r4k_blast_scache = blast_scache128;
 }
 
+static void (*r4k_blast_scache_node)(long node);
+
+static void r4k_blast_scache_node_setup(void)
+{
+	unsigned long sc_lsize = cpu_scache_line_size();
+
+	if (current_cpu_type() != CPU_LOONGSON3)
+		r4k_blast_scache_node = (void *)cache_noop;
+	else if (sc_lsize == 16)
+		r4k_blast_scache_node = blast_scache16_node;
+	else if (sc_lsize == 32)
+		r4k_blast_scache_node = blast_scache32_node;
+	else if (sc_lsize == 64)
+		r4k_blast_scache_node = blast_scache64_node;
+	else if (sc_lsize == 128)
+		r4k_blast_scache_node = blast_scache128_node;
+}
+
 static inline void local_r4k___flush_cache_all(void * args)
 {
 	switch (current_cpu_type()) {
 	case CPU_LOONGSON2:
-	case CPU_LOONGSON3:
 	case CPU_R4000SC:
 	case CPU_R4000MC:
 	case CPU_R4400SC:
@@ -480,6 +497,11 @@
 		r4k_blast_scache();
 		break;
 
+	case CPU_LOONGSON3:
+		/* Use get_ebase_cpunum() for both NUMA=y/n */
+		r4k_blast_scache_node(get_ebase_cpunum() >> 2);
+		break;
+
 	case CPU_BMIPS5000:
 		r4k_blast_scache();
 		__sync();
@@ -840,10 +862,14 @@
 
 	preempt_disable();
 	if (cpu_has_inclusive_pcaches) {
-		if (size >= scache_size)
-			r4k_blast_scache();
-		else
+		if (size >= scache_size) {
+			if (current_cpu_type() != CPU_LOONGSON3)
+				r4k_blast_scache();
+			else
+				r4k_blast_scache_node(pa_to_nid(addr));
+		} else {
 			blast_scache_range(addr, addr + size);
+		}
 		preempt_enable();
 		__sync();
 		return;
@@ -877,9 +903,12 @@
 
 	preempt_disable();
 	if (cpu_has_inclusive_pcaches) {
-		if (size >= scache_size)
-			r4k_blast_scache();
-		else {
+		if (size >= scache_size) {
+			if (current_cpu_type() != CPU_LOONGSON3)
+				r4k_blast_scache();
+			else
+				r4k_blast_scache_node(pa_to_nid(addr));
+		} else {
 			/*
 			 * There is no clearly documented alignment requirement
 			 * for the cache instruction on MIPS processors and
@@ -1918,6 +1947,7 @@
 	r4k_blast_scache_page_setup();
 	r4k_blast_scache_page_indexed_setup();
 	r4k_blast_scache_setup();
+	r4k_blast_scache_node_setup();
 #ifdef CONFIG_EVA
 	r4k_blast_dcache_user_page_setup();
 	r4k_blast_icache_user_page_setup();
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index 2a5bb84..288b58b 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -369,7 +369,9 @@
 	int irq;
 	struct irq_chip *msi;
 
-	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
+	if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
+		return 0;
+	} else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
 		msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
 		msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
 		msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
index b3d6bf2..3ef3fb6 100644
--- a/arch/mips/sibyte/common/Makefile
+++ b/arch/mips/sibyte/common/Makefile
@@ -1,4 +1,5 @@
 obj-y := cfe.o
+obj-$(CONFIG_SWIOTLB)			+= dma.o
 obj-$(CONFIG_SIBYTE_BUS_WATCHER)	+= bus_watcher.o
 obj-$(CONFIG_SIBYTE_CFE_CONSOLE)	+= cfe_console.o
 obj-$(CONFIG_SIBYTE_TBPROF)		+= sb_tbprof.o
diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
new file mode 100644
index 0000000..eb47a94
--- /dev/null
+++ b/arch/mips/sibyte/common/dma.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *	DMA support for Broadcom SiByte platforms.
+ *
+ *	Copyright (c) 2018  Maciej W. Rozycki
+ */
+
+#include <linux/swiotlb.h>
+#include <asm/bootinfo.h>
+
+void __init plat_swiotlb_setup(void)
+{
+	swiotlb_init(1);
+}
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index d3e19a5..9f52db9 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -4,7 +4,7 @@
 #ifndef _ASMNDS32_PGTABLE_H
 #define _ASMNDS32_PGTABLE_H
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #include <asm-generic/4level-fixup.h>
 #include <asm-generic/sizes.h>
 
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index fa6b7c78..ff0860b 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -117,7 +117,7 @@
 #if CONFIG_PGTABLE_LEVELS == 3
 #define BITS_PER_PMD	(PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
 #else
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #define BITS_PER_PMD	0
 #endif
 #define PTRS_PER_PMD    (1UL << BITS_PER_PMD)
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index d2824b0..c4c0399 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -160,8 +160,17 @@
 CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
 endif
 
+ifdef CONFIG_FUNCTION_TRACER
+CC_FLAGS_FTRACE := -pg
 ifdef CONFIG_MPROFILE_KERNEL
-	CC_FLAGS_FTRACE := -pg -mprofile-kernel
+CC_FLAGS_FTRACE += -mprofile-kernel
+endif
+# Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8
+# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199
+# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828
+ifneq ($(cc-name),clang)
+CC_FLAGS_FTRACE	+= $(call cc-ifversion, -lt, 0409, -mno-sched-epilog)
+endif
 endif
 
 CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
@@ -229,11 +238,6 @@
 KBUILD_CFLAGS		+= -mcpu=powerpc
 endif
 
-# Work around a gcc code-gen bug with -fno-omit-frame-pointer.
-ifdef CONFIG_FUNCTION_TRACER
-KBUILD_CFLAGS		+= -mno-sched-epilog
-endif
-
 cpu-as-$(CONFIG_4xx)		+= -Wa,-m405
 cpu-as-$(CONFIG_ALTIVEC)	+= $(call as-option,-Wa$(comma)-maltivec)
 cpu-as-$(CONFIG_E200)		+= -Wa,-me200
@@ -408,36 +412,9 @@
 # to stdout and these checks are run even on install targets.
 TOUT	:= .tmp_gas_check
 
-# Check gcc and binutils versions:
-# - gcc-3.4 and binutils-2.14 are a fatal combination
-# - Require gcc 4.0 or above on 64-bit
-# - gcc-4.2.0 has issues compiling modules on 64-bit
+# Check toolchain versions:
+# - gcc-4.6 is the minimum kernel-wide version so nothing required.
 checkbin:
-	@if test "$(cc-name)" != "clang" \
-	    && test "$(cc-version)" = "0304" ; then \
-		if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
-			echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
-			echo 'correctly with gcc-3.4 and your version of binutils.'; \
-			echo '*** Please upgrade your binutils or downgrade your gcc'; \
-			false; \
-		fi ; \
-	fi
-	@if test "$(cc-name)" != "clang" \
-	    && test "$(cc-version)" -lt "0400" \
-	    && test "x${CONFIG_PPC64}" = "xy" ; then \
-                echo -n "Sorry, GCC v4.0 or above is required to build " ; \
-                echo "the 64-bit powerpc kernel." ; \
-                false ; \
-        fi
-	@if test "$(cc-name)" != "clang" \
-	    && test "$(cc-fullversion)" = "040200" \
-	    && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \
-		echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
-		echo 'kernel with modules enabled.' ; \
-		echo -n '*** Please use a different GCC version or ' ; \
-		echo 'disable kernel modules' ; \
-		false ; \
-	fi
 	@if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \
 	    && $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \
 		echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 0fb96c2..25e3184 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -55,6 +55,11 @@
 
 BOOTARFLAGS	:= -cr$(KBUILD_ARFLAGS)
 
+ifdef CONFIG_CC_IS_CLANG
+BOOTCFLAGS += $(CLANG_FLAGS)
+BOOTAFLAGS += $(CLANG_FLAGS)
+endif
+
 ifdef CONFIG_DEBUG_INFO
 BOOTCFLAGS	+= -g
 endif
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 32dfe6d..9b9d174 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -15,7 +15,7 @@
 RELA = 7
 RELACOUNT = 0x6ffffff9
 
-	.text
+	.data
 	/* A procedure descriptor used when booting this as a COFF file.
 	 * When making COFF, this comes first in the link and we're
 	 * linked at 0x500000.
@@ -23,6 +23,8 @@
 	.globl	_zimage_start_opd
 _zimage_start_opd:
 	.long	0x500000, 0, 0, 0
+	.text
+	b	_zimage_start
 
 #ifdef __powerpc64__
 .balign 8
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 3b66f2c..eac1879 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -5,6 +5,9 @@
 
 CFLAGS_ptrace.o		+= -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
+# Disable clang warning for using setjmp without setjmp.h header
+CFLAGS_crash.o		+= $(call cc-disable-warning, builtin-requires-header)
+
 subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
 
 ifdef CONFIG_PPC64
@@ -22,10 +25,10 @@
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
-CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_prom_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_btext.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_prom.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_cputable.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_prom_init.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_btext.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_prom.o = $(CC_FLAGS_FTRACE)
 endif
 
 obj-y				:= cputable.o ptrace.o syscalls.o \
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index f6f469f..1b395b8 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -22,7 +22,7 @@
 	COUNT_CACHE_FLUSH_SW	= 0x2,
 	COUNT_CACHE_FLUSH_HW	= 0x4,
 };
-static enum count_cache_flush_type count_cache_flush_type;
+static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
 
 bool barrier_nospec_enabled;
 static bool no_nospec;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index e6474a4..fd59fef 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -848,7 +848,23 @@
 	/* If TM bits are set to the reserved value, it's an invalid context */
 	if (MSR_TM_RESV(msr_hi))
 		return 1;
-	/* Pull in the MSR TM bits from the user context */
+
+	/*
+	 * Disabling preemption, since it is unsafe to be preempted
+	 * with MSR[TS] set without recheckpointing.
+	 */
+	preempt_disable();
+
+	/*
+	 * CAUTION:
+	 * After regs->MSR[TS] being updated, make sure that get_user(),
+	 * put_user() or similar functions are *not* called. These
+	 * functions can generate page faults which will cause the process
+	 * to be de-scheduled with MSR[TS] set but without calling
+	 * tm_recheckpoint(). This can cause a bug.
+	 *
+	 * Pull in the MSR TM bits from the user context
+	 */
 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
@@ -873,6 +889,8 @@
 	}
 #endif
 
+	preempt_enable();
+
 	return 0;
 }
 #endif
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 83d51bf..bbd1c73 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -467,20 +467,6 @@
 	if (MSR_TM_RESV(msr))
 		return -EINVAL;
 
-	/* pull in MSR TS bits from user context */
-	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
-
-	/*
-	 * Ensure that TM is enabled in regs->msr before we leave the signal
-	 * handler. It could be the case that (a) user disabled the TM bit
-	 * through the manipulation of the MSR bits in uc_mcontext or (b) the
-	 * TM bit was disabled because a sufficient number of context switches
-	 * happened whilst in the signal handler and load_tm overflowed,
-	 * disabling the TM bit. In either case we can end up with an illegal
-	 * TM state leading to a TM Bad Thing when we return to userspace.
-	 */
-	regs->msr |= MSR_TM;
-
 	/* pull in MSR LE from user context */
 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 
@@ -572,6 +558,34 @@
 	tm_enable();
 	/* Make sure the transaction is marked as failed */
 	tsk->thread.tm_texasr |= TEXASR_FS;
+
+	/*
+	 * Disabling preemption, since it is unsafe to be preempted
+	 * with MSR[TS] set without recheckpointing.
+	 */
+	preempt_disable();
+
+	/* pull in MSR TS bits from user context */
+	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+
+	/*
+	 * Ensure that TM is enabled in regs->msr before we leave the signal
+	 * handler. It could be the case that (a) user disabled the TM bit
+	 * through the manipulation of the MSR bits in uc_mcontext or (b) the
+	 * TM bit was disabled because a sufficient number of context switches
+	 * happened whilst in the signal handler and load_tm overflowed,
+	 * disabling the TM bit. In either case we can end up with an illegal
+	 * TM state leading to a TM Bad Thing when we return to userspace.
+	 *
+	 * CAUTION:
+	 * After regs->MSR[TS] being updated, make sure that get_user(),
+	 * put_user() or similar functions are *not* called. These
+	 * functions can generate page faults which will cause the process
+	 * to be de-scheduled with MSR[TS] set but without calling
+	 * tm_recheckpoint(). This can cause a bug.
+	 */
+	regs->msr |= MSR_TM;
+
 	/* This loads the checkpointed FP/VEC state, if used */
 	tm_recheckpoint(&tsk->thread);
 
@@ -585,6 +599,8 @@
 		regs->msr |= MSR_VEC;
 	}
 
+	preempt_enable();
+
 	return err;
 }
 #endif
diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile
index d22d8ba..d868ba4 100644
--- a/arch/powerpc/kernel/trace/Makefile
+++ b/arch/powerpc/kernel/trace/Makefile
@@ -7,7 +7,7 @@
 
 ifdef CONFIG_FUNCTION_TRACER
 # do not trace tracer code
-CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
 endif
 
 obj32-$(CONFIG_FUNCTION_TRACER)		+= ftrace_32.o
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
index bdf33b9..8464c2c 100644
--- a/arch/powerpc/mm/dump_linuxpagetables.c
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -19,6 +19,7 @@
 #include <linux/hugetlb.h>
 #include <linux/io.h>
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <asm/fixmap.h>
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 7a9886f..a5091c0 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -188,15 +188,20 @@
 	pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
 
 	for (; start < end; start += page_size) {
-		void *p;
+		void *p = NULL;
 		int rc;
 
 		if (vmemmap_populated(start, page_size))
 			continue;
 
+		/*
+		 * Allocate from the altmap first if we have one. This may
+		 * fail due to alignment issues when using 16MB hugepages, so
+		 * fall back to system memory if the altmap allocation fail.
+		 */
 		if (altmap)
 			p = altmap_alloc_block_buf(page_size, altmap);
-		else
+		if (!p)
 			p = vmemmap_alloc_block_buf(page_size, node);
 		if (!p)
 			return -ENOMEM;
@@ -255,8 +260,15 @@
 {
 	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
 	unsigned long page_order = get_order(page_size);
+	unsigned long alt_start = ~0, alt_end = ~0;
+	unsigned long base_pfn;
 
 	start = _ALIGN_DOWN(start, page_size);
+	if (altmap) {
+		alt_start = altmap->base_pfn;
+		alt_end = altmap->base_pfn + altmap->reserve +
+			  altmap->free + altmap->alloc + altmap->align;
+	}
 
 	pr_debug("vmemmap_free %lx...%lx\n", start, end);
 
@@ -280,8 +292,9 @@
 		page = pfn_to_page(addr >> PAGE_SHIFT);
 		section_base = pfn_to_page(vmemmap_section_start(start));
 		nr_pages = 1 << page_order;
+		base_pfn = PHYS_PFN(addr);
 
-		if (altmap) {
+		if (base_pfn >= alt_start && base_pfn < alt_end) {
 			vmem_altmap_free(altmap, nr_pages);
 		} else if (PageReserved(page)) {
 			/* allocated from bootmem */
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index f2839ee..561a67d 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -3,7 +3,7 @@
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
-CFLAGS_REMOVE_bootx_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_bootx_init.o = $(CC_FLAGS_FTRACE)
 endif
 
 obj-y				+= pic.o setup.o time.o feature.o pci.o \
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 1bc3abb..9d7d8e6 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -1,14 +1,17 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for xmon
 
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+# Disable clang warning for using setjmp without setjmp.h header
+subdir-ccflags-y := $(call cc-disable-warning, builtin-requires-header)
+
+subdir-ccflags-$(CONFIG_PPC_WERROR) += -Werror
 
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
 
 # Disable ftrace for the entire directory
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst -mno-sched-epilog,,$(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)))
+KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
 
 ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
 
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 4264aed..dd6badc 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -75,6 +75,9 @@
 #define xmon_owner 0
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_PPC_PSERIES
+static int set_indicator_token = RTAS_UNKNOWN_SERVICE;
+#endif
 static unsigned long in_xmon __read_mostly = 0;
 static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT);
 
@@ -358,7 +361,6 @@
 #ifdef CONFIG_PPC_PSERIES
 	/* Since this can't be a module, args should end up below 4GB. */
 	static struct rtas_args args;
-	int token;
 
 	/*
 	 * At this point we have got all the cpus we can into
@@ -367,11 +369,11 @@
 	 * If we did try to take rtas.lock there would be a
 	 * real possibility of deadlock.
 	 */
-	token = rtas_token("set-indicator");
-	if (token == RTAS_UNKNOWN_SERVICE)
+	if (set_indicator_token == RTAS_UNKNOWN_SERVICE)
 		return;
 
-	rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0);
+	rtas_call_unlocked(&args, set_indicator_token, 3, 1, NULL,
+			   SURVEILLANCE_TOKEN, 0, 0);
 
 #endif /* CONFIG_PPC_PSERIES */
 }
@@ -3672,6 +3674,14 @@
 		__debugger_iabr_match = xmon_iabr_match;
 		__debugger_break_match = xmon_break_match;
 		__debugger_fault_handler = xmon_fault_handler;
+
+#ifdef CONFIG_PPC_PSERIES
+		/*
+		 * Get the token here to avoid trying to get a lock
+		 * during the crash, causing a deadlock.
+		 */
+		set_indicator_token = rtas_token("set-indicator");
+#endif
 	} else {
 		__debugger = NULL;
 		__debugger_ipi = NULL;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index f1ab942..09b61d0 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -89,8 +89,6 @@
 {
 	int cpu = smp_processor_id();
 
-	if (prev == next)
-		return;
 	S390_lowcore.user_asce = next->context.asce;
 	cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
 	/* Clear previous user-ASCE from CR1 and CR7 */
@@ -102,7 +100,8 @@
 		__ctl_load(S390_lowcore.vdso_asce, 7, 7);
 		clear_cpu_flag(CIF_ASCE_SECONDARY);
 	}
-	cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
+	if (prev != next)
+		cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
 }
 
 #define finish_arch_post_lock_switch finish_arch_post_lock_switch
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 5b28b43..e7e6608 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -64,10 +64,10 @@
 	if (stsi(vmms, 3, 2, 2) || !vmms->count)
 		return;
 
-	/* Running under KVM? If not we assume z/VM */
+	/* Detect known hypervisors */
 	if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
 		S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
-	else
+	else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
 		S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
 }
 
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index c637c12..a0097f8 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -882,6 +882,8 @@
 		pr_info("Linux is running under KVM in 64-bit mode\n");
 	else if (MACHINE_IS_LPAR)
 		pr_info("Linux is running natively in 64-bit mode\n");
+	else
+		pr_info("Linux is running as a guest in 64-bit mode\n");
 
 	/* Have one command line that is parsed and saved in /proc/cmdline */
 	/* boot_command_line has been already set up in early.c */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2f8f7d7..da02f40 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -371,9 +371,13 @@
  */
 void smp_call_ipl_cpu(void (*func)(void *), void *data)
 {
+	struct lowcore *lc = pcpu_devices->lowcore;
+
+	if (pcpu_devices[0].address == stap())
+		lc = &S390_lowcore;
+
 	pcpu_delegate(&pcpu_devices[0], func, data,
-		      pcpu_devices->lowcore->panic_stack -
-		      PANIC_FRAME_OFFSET + PAGE_SIZE);
+		      lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
 }
 
 int smp_find_processor_id(u16 address)
@@ -1152,7 +1156,11 @@
 {
 	int rc;
 
+	rc = lock_device_hotplug_sysfs();
+	if (rc)
+		return rc;
 	rc = smp_rescan_cpus();
+	unlock_device_hotplug();
 	return rc ? rc : count;
 }
 static DEVICE_ATTR_WO(rescan);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 19b2d2a..eeb7450 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -436,7 +436,7 @@
 	struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
 	int rc;
 
-	rrb = clp_alloc_block(GFP_KERNEL);
+	rrb = clp_alloc_block(GFP_ATOMIC);
 	if (!rrb)
 		return -ENOMEM;
 
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index d873790..e8bc98bb 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -183,9 +183,12 @@
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_CLS_U32=y
+CONFIG_NET_CLS_BPF=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_U32=y
 CONFIG_NET_CLS_ACT=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
 CONFIG_CFG80211=y
 CONFIG_MAC80211=y
 CONFIG_RFKILL=y
@@ -379,10 +382,10 @@
 # CONFIG_PWRSEQ_SIMPLE is not set
 # CONFIG_MMC_BLOCK is not set
 CONFIG_RTC_CLASS=y
-# CONFIG_RTC_HCTOSYS is not set
 CONFIG_SW_SYNC=y
 CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_INPUT=y
 CONFIG_VIRTIO_MMIO=y
 CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
 CONFIG_STAGING=y
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 7d0df78..40d2834 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -356,7 +356,8 @@
 
 	/* Need to switch before accessing the thread stack. */
 	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
-	movq	%rsp, %rdi
+	/* In the Xen PV case we already run on the thread stack. */
+	ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
 	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
 	pushq	6*8(%rdi)		/* regs->ss */
@@ -365,8 +366,9 @@
 	pushq	3*8(%rdi)		/* regs->cs */
 	pushq	2*8(%rdi)		/* regs->ip */
 	pushq	1*8(%rdi)		/* regs->orig_ax */
-
 	pushq	(%rdi)			/* pt_regs->di */
+.Lint80_keep_stack:
+
 	pushq	%rsi			/* pt_regs->si */
 	xorl	%esi, %esi		/* nospec   si */
 	pushq	%rdx			/* pt_regs->dx */
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 141d415..c3d7ccd 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -171,7 +171,8 @@
 		 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
 VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
-	$(call ld-option, --build-id) -Bsymbolic
+	$(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \
+	-Bsymbolic
 GCOV_PROFILE := n
 
 #
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 022845e..728dc66 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1441,7 +1441,7 @@
 	"cmpb $0, kvm_rebooting \n\t"	      \
 	"jne 668b \n\t"      		      \
 	__ASM_SIZE(push) " $666b \n\t"	      \
-	"call kvm_spurious_fault \n\t"	      \
+	"jmp kvm_spurious_fault \n\t"	      \
 	".popsection \n\t" \
 	_ASM_EXTABLE(666b, 667b)
 
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index eeeb928..2252b63 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -178,6 +178,10 @@
 
 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
 
+/*
+ * Init a new mm.  Used on mm copies, like at fork()
+ * and on mm's that are brand-new, like at execve().
+ */
 static inline int init_new_context(struct task_struct *tsk,
 				   struct mm_struct *mm)
 {
@@ -228,8 +232,22 @@
 } while (0)
 #endif
 
+static inline void arch_dup_pkeys(struct mm_struct *oldmm,
+				  struct mm_struct *mm)
+{
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+		return;
+
+	/* Duplicate the oldmm pkey state in mm: */
+	mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
+	mm->context.execute_only_pkey   = oldmm->context.execute_only_pkey;
+#endif
+}
+
 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
 {
+	arch_dup_pkeys(oldmm, mm);
 	paravirt_arch_dup_mmap(oldmm, mm);
 	return ldt_dup_context(oldmm, mm);
 }
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index b3486c8..1f9de76 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -389,6 +389,7 @@
 #define MSR_F15H_NB_PERF_CTR		0xc0010241
 #define MSR_F15H_PTSC			0xc0010280
 #define MSR_F15H_IC_CFG			0xc0011021
+#define MSR_F15H_EX_CFG			0xc001102c
 
 /* Fam 10h MSRs */
 #define MSR_FAM10H_MMIO_CONF_BASE	0xc0010058
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 84bd9bd..88bca45 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -111,6 +111,11 @@
  */
 #define MAXMEM			(1UL << MAX_PHYSMEM_BITS)
 
+#define GUARD_HOLE_PGD_ENTRY	-256UL
+#define GUARD_HOLE_SIZE		(16UL << PGDIR_SHIFT)
+#define GUARD_HOLE_BASE_ADDR	(GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
+#define GUARD_HOLE_END_ADDR	(GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
+
 #define LDT_PGD_ENTRY		-240UL
 #define LDT_BASE_ADDR		(LDT_PGD_ENTRY << PGDIR_SHIFT)
 #define LDT_END_ADDR		(LDT_BASE_ADDR + PGDIR_SIZE)
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 3de6933..afbc872 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -104,9 +104,9 @@
 
 void math_emulate(struct math_emu_info *);
 #ifndef CONFIG_X86_32
-asmlinkage void smp_thermal_interrupt(void);
-asmlinkage void smp_threshold_interrupt(void);
-asmlinkage void smp_deferred_error_interrupt(void);
+asmlinkage void smp_thermal_interrupt(struct pt_regs *regs);
+asmlinkage void smp_threshold_interrupt(struct pt_regs *regs);
+asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs);
 #endif
 
 extern void ist_enter(struct pt_regs *regs);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 78928f5..807d06a 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -213,7 +213,7 @@
 static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
 	SPECTRE_V2_USER_NONE;
 
-#ifdef RETPOLINE
+#ifdef CONFIG_RETPOLINE
 static bool spectre_v2_bad_module;
 
 bool retpoline_module_ok(bool has_retpoline)
@@ -1000,7 +1000,8 @@
 #endif
 
 	half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
-	if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
+	if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
+			e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
 		pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
 		pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
 				half_pa);
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
index 0f53049..627e5c8 100644
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
@@ -23,6 +23,7 @@
 
 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 
+#include <linux/cpu.h>
 #include <linux/kernfs.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
@@ -310,9 +311,11 @@
 		return -EINVAL;
 	buf[nbytes - 1] = '\0';
 
+	cpus_read_lock();
 	rdtgrp = rdtgroup_kn_lock_live(of->kn);
 	if (!rdtgrp) {
 		rdtgroup_kn_unlock(of->kn);
+		cpus_read_unlock();
 		return -ENOENT;
 	}
 	rdt_last_cmd_clear();
@@ -367,6 +370,7 @@
 
 out:
 	rdtgroup_kn_unlock(of->kn);
+	cpus_read_unlock();
 	return ret ?: nbytes;
 }
 
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index e12454e..9f915a8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -23,6 +23,7 @@
 #include <linux/string.h>
 
 #include <asm/amd_nb.h>
+#include <asm/traps.h>
 #include <asm/apic.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
@@ -99,7 +100,7 @@
 	[0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
 };
 
-const char *smca_get_name(enum smca_bank_types t)
+static const char *smca_get_name(enum smca_bank_types t)
 {
 	if (t >= N_SMCA_BANK_TYPES)
 		return NULL;
@@ -824,7 +825,7 @@
 	mce_log(&m);
 }
 
-asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
+asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs)
 {
 	entering_irq();
 	trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 2da67b7..ee229ce 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -25,6 +25,7 @@
 #include <linux/cpu.h>
 
 #include <asm/processor.h>
+#include <asm/traps.h>
 #include <asm/apic.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
@@ -390,7 +391,7 @@
 
 static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
 
-asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r)
+asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs)
 {
 	entering_irq();
 	trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
index 2b584b3..c21e0a1 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -6,6 +6,7 @@
 #include <linux/kernel.h>
 
 #include <asm/irq_vectors.h>
+#include <asm/traps.h>
 #include <asm/apic.h>
 #include <asm/mce.h>
 #include <asm/trace/irq_vectors.h>
@@ -18,7 +19,7 @@
 
 void (*mce_threshold_vector)(void) = default_threshold_interrupt;
 
-asmlinkage __visible void __irq_entry smp_threshold_interrupt(void)
+asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs)
 {
 	entering_irq();
 	trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 40eee6c..254683b 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -165,6 +165,8 @@
 	struct mtrr_gentry gentry;
 	void __user *arg = (void __user *) __arg;
 
+	memset(&gentry, 0, sizeof(gentry));
+
 	switch (cmd) {
 	case MTRRIOC_ADD_ENTRY:
 	case MTRRIOC_SET_ENTRY:
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index d9b7192..7f89d60 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -457,6 +457,7 @@
 #else
 	u64 ipi_bitmap = 0;
 #endif
+	long ret;
 
 	if (cpumask_empty(mask))
 		return;
@@ -482,8 +483,9 @@
 		} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
 			max = apic_id < max ? max : apic_id;
 		} else {
-			kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
+			ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
 				(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
+			WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
 			min = max = apic_id;
 			ipi_bitmap = 0;
 		}
@@ -491,8 +493,9 @@
 	}
 
 	if (ipi_bitmap) {
-		kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
+		ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
 			(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
+		WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
 	}
 
 	local_irq_restore(flags);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f02ecaf..6489067 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1346,7 +1346,7 @@
 	 * extrapolate the boot cpu's data to all packages.
 	 */
 	ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
-	__max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
+	__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
 	pr_info("Max logical packages: %u\n", __max_logical_packages);
 }
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c97a9d6..39a0e34 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8011,13 +8011,16 @@
 
 	kvm_mce_cap_supported |= MCG_LMCE_P;
 
-	return alloc_kvm_area();
+	r = alloc_kvm_area();
+	if (r)
+		goto out;
+	return 0;
 
 out:
 	for (i = 0; i < VMX_BITMAP_NR; i++)
 		free_page((unsigned long)vmx_bitmap[i]);
 
-    return r;
+	return r;
 }
 
 static __exit void hardware_unsetup(void)
@@ -8287,11 +8290,11 @@
 	if (r < 0)
 		goto out_vmcs02;
 
-	vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+	vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
 	if (!vmx->nested.cached_vmcs12)
 		goto out_cached_vmcs12;
 
-	vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+	vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
 	if (!vmx->nested.cached_shadow_vmcs12)
 		goto out_cached_shadow_vmcs12;
 
@@ -11471,6 +11474,8 @@
 			kunmap(vmx->nested.pi_desc_page);
 			kvm_release_page_dirty(vmx->nested.pi_desc_page);
 			vmx->nested.pi_desc_page = NULL;
+			vmx->nested.pi_desc = NULL;
+			vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
 		}
 		page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
 		if (is_error_page(page))
@@ -11728,7 +11733,7 @@
 	    !nested_exit_intr_ack_set(vcpu) ||
 	    (vmcs12->posted_intr_nv & 0xff00) ||
 	    (vmcs12->posted_intr_desc_addr & 0x3f) ||
-	    (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr))))
+	    (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
 		return -EINVAL;
 
 	/* tpr shadow is needed by all apicv features. */
@@ -13979,13 +13984,17 @@
 	else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs)
 		copy_shadow_to_vmcs12(vmx);
 
-	if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
+	/*
+	 * Copy over the full allocated size of vmcs12 rather than just the size
+	 * of the struct.
+	 */
+	if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
 		return -EFAULT;
 
 	if (nested_cpu_has_shadow_vmcs(vmcs12) &&
 	    vmcs12->vmcs_link_pointer != -1ull) {
 		if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
-				 get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
+				 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
 			return -EFAULT;
 	}
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 68b53f0..5a9a3eb 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2343,6 +2343,7 @@
 	case MSR_AMD64_PATCH_LOADER:
 	case MSR_AMD64_BU_CFG2:
 	case MSR_AMD64_DC_CFG:
+	case MSR_F15H_EX_CFG:
 		break;
 
 	case MSR_IA32_UCODE_REV:
@@ -2638,6 +2639,7 @@
 	case MSR_AMD64_BU_CFG2:
 	case MSR_IA32_PERF_CTL:
 	case MSR_AMD64_DC_CFG:
+	case MSR_F15H_EX_CFG:
 		msr_info->data = 0;
 		break;
 	case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
@@ -6275,8 +6277,7 @@
 		toggle_interruptibility(vcpu, ctxt->interruptibility);
 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
 		kvm_rip_write(vcpu, ctxt->eip);
-		if (r == EMULATE_DONE &&
-		    (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+		if (r == EMULATE_DONE && ctxt->tf)
 			kvm_vcpu_do_singlestep(vcpu, &r);
 		if (!ctxt->have_exception ||
 		    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
@@ -6866,10 +6867,10 @@
 	case KVM_HC_CLOCK_PAIRING:
 		ret = kvm_pv_clock_pairing(vcpu, a0, a1);
 		break;
+#endif
 	case KVM_HC_SEND_IPI:
 		ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
 		break;
-#endif
 	default:
 		ret = -KVM_ENOSYS;
 		break;
@@ -7304,7 +7305,7 @@
 
 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 {
-	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
+	if (!kvm_apic_present(vcpu))
 		return;
 
 	bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
index 79778ab..a536651 100644
--- a/arch/x86/lib/kaslr.c
+++ b/arch/x86/lib/kaslr.c
@@ -36,8 +36,8 @@
 	u16 status, timer;
 
 	do {
-		outb(I8254_PORT_CONTROL,
-		     I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
+		outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
+		     I8254_PORT_CONTROL);
 		status = inb(I8254_PORT_COUNTER0);
 		timer  = inb(I8254_PORT_COUNTER0);
 		timer |= inb(I8254_PORT_COUNTER0) << 8;
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index a12afff..c05a818 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -53,10 +53,10 @@
 enum address_markers_idx {
 	USER_SPACE_NR = 0,
 	KERNEL_SPACE_NR,
-	LOW_KERNEL_NR,
-#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
 	LDT_NR,
 #endif
+	LOW_KERNEL_NR,
 	VMALLOC_START_NR,
 	VMEMMAP_START_NR,
 #ifdef CONFIG_KASAN
@@ -64,9 +64,6 @@
 	KASAN_SHADOW_END_NR,
 #endif
 	CPU_ENTRY_AREA_NR,
-#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
-	LDT_NR,
-#endif
 #ifdef CONFIG_X86_ESPFIX64
 	ESPFIX_START_NR,
 #endif
@@ -493,11 +490,11 @@
 {
 #ifdef CONFIG_X86_64
 	/*
-	 * ffff800000000000 - ffff87ffffffffff is reserved for
-	 * the hypervisor.
+	 * A hole in the beginning of kernel address space reserved
+	 * for a hypervisor.
 	 */
-	return	(idx >= pgd_index(__PAGE_OFFSET) - 16) &&
-		(idx <  pgd_index(__PAGE_OFFSET));
+	return	(idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
+		(idx <  pgd_index(GUARD_HOLE_END_ADDR));
 #else
 	return false;
 #endif
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index faca978..d883869 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -932,7 +932,7 @@
 
 	pages = generic_max_swapfile_size();
 
-	if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+	if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
 		/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
 		unsigned long long l1tf_limit = l1tf_pfn_limit();
 		/*
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index dd519f3..a3e9c6e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -585,7 +585,6 @@
 							   paddr_end,
 							   page_size_mask,
 							   prot);
-				__flush_tlb_all();
 				continue;
 			}
 			/*
@@ -628,7 +627,6 @@
 		pud_populate(&init_mm, pud, pmd);
 		spin_unlock(&init_mm.page_table_lock);
 	}
-	__flush_tlb_all();
 
 	update_page_count(PG_LEVEL_1G, pages);
 
@@ -669,7 +667,6 @@
 			paddr_last = phys_pud_init(pud, paddr,
 					paddr_end,
 					page_size_mask);
-			__flush_tlb_all();
 			continue;
 		}
 
@@ -681,7 +678,6 @@
 		p4d_populate(&init_mm, p4d, pud);
 		spin_unlock(&init_mm.page_table_lock);
 	}
-	__flush_tlb_all();
 
 	return paddr_last;
 }
@@ -734,8 +730,6 @@
 	if (pgd_changed)
 		sync_global_pgds(vaddr_start, vaddr_end - 1);
 
-	__flush_tlb_all();
-
 	return paddr_last;
 }
 
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 3d0c83e..a3c9ea2 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -519,8 +519,13 @@
 	 * for a "decoy" virtual address (bit 63 clear) passed to
 	 * set_memory_X(). __pa() on a "decoy" address results in a
 	 * physical address with bit 63 set.
+	 *
+	 * Decoy addresses are not present for 32-bit builds, see
+	 * set_mce_nospec().
 	 */
-	return address & __PHYSICAL_MASK;
+	if (IS_ENABLED(CONFIG_X86_64))
+		return address & __PHYSICAL_MASK;
+	return address;
 }
 
 /*
@@ -546,7 +551,11 @@
 
 	start = sanitize_phys(start);
 	end = sanitize_phys(end);
-	BUG_ON(start >= end); /* end is exclusive */
+	if (start >= end) {
+		WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
+				start, end - 1, cattr_name(req_type));
+		return -EINVAL;
+	}
 
 	if (!pat_enabled()) {
 		/* This is identical to page table setting without PAT */
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 2c84c6a..c8f011e 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -640,19 +640,20 @@
 			  unsigned long limit)
 {
 	int i, nr, flush = 0;
-	unsigned hole_low, hole_high;
+	unsigned hole_low = 0, hole_high = 0;
 
 	/* The limit is the last byte to be touched */
 	limit--;
 	BUG_ON(limit >= FIXADDR_TOP);
 
+#ifdef CONFIG_X86_64
 	/*
 	 * 64-bit has a great big hole in the middle of the address
-	 * space, which contains the Xen mappings.  On 32-bit these
-	 * will end up making a zero-sized hole and so is a no-op.
+	 * space, which contains the Xen mappings.
 	 */
-	hole_low = pgd_index(USER_LIMIT);
-	hole_high = pgd_index(PAGE_OFFSET);
+	hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
+	hole_high = pgd_index(GUARD_HOLE_END_ADDR);
+#endif
 
 	nr = pgd_index(limit) + 1;
 	for (i = 0; i < nr; i++) {
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index c84f1e0..01dcccf 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -361,8 +361,6 @@
 {
 	int cpu;
 
-	pvclock_resume();
-
 	if (xen_clockevent != &xen_vcpuop_clockevent)
 		return;
 
@@ -379,12 +377,15 @@
 };
 
 static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
+static u64 xen_clock_value_saved;
 
 void xen_save_time_memory_area(void)
 {
 	struct vcpu_register_time_memory_area t;
 	int ret;
 
+	xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
+
 	if (!xen_clock)
 		return;
 
@@ -404,7 +405,7 @@
 	int ret;
 
 	if (!xen_clock)
-		return;
+		goto out;
 
 	t.addr.v = &xen_clock->pvti;
 
@@ -421,6 +422,11 @@
 	if (ret != 0)
 		pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
 			  ret);
+
+out:
+	/* Need pvclock_resume() before using xen_clocksource_read(). */
+	pvclock_resume();
+	xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
 }
 
 static void xen_setup_vsyscall_time_info(void)
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 29bfe80..da1de19 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -54,13 +54,14 @@
  * Mark a hardware queue as needing a restart. For shared queues, maintain
  * a count of how many hardware queues are marked for restart.
  */
-static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
 {
 	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
 		return;
 
 	set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
 }
+EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
 
 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
 {
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 4e028ee..fe66076 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -15,6 +15,7 @@
 				struct request **merged_request);
 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
+void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
 
 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
diff --git a/block/blk-stat.h b/block/blk-stat.h
index f4a1568..17b47a8 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -145,6 +145,11 @@
 	mod_timer(&cb->timer, jiffies + nsecs_to_jiffies(nsecs));
 }
 
+static inline void blk_stat_deactivate(struct blk_stat_callback *cb)
+{
+	del_timer_sync(&cb->timer);
+}
+
 /**
  * blk_stat_activate_msecs() - Gather block statistics during a time window in
  * milliseconds.
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 8ac93fc..0c62bf4 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -760,8 +760,10 @@
 	if (!rqos)
 		return;
 	rwb = RQWB(rqos);
-	if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
+	if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
+		blk_stat_deactivate(rwb->cb);
 		rwb->wb_normal = 0;
+	}
 }
 EXPORT_SYMBOL_GPL(wbt_disable_default);
 
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 099a9e05..d5e21ce 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -373,9 +373,16 @@
 
 /*
  * One confusing aspect here is that we get called for a specific
- * hardware queue, but we return a request that may not be for a
+ * hardware queue, but we may return a request that is for a
  * different hardware queue. This is because mq-deadline has shared
  * state for all hardware queues, in terms of sorting, FIFOs, etc.
+ *
+ * For a zoned block device, __dd_dispatch_request() may return NULL
+ * if all the queued write requests are directed at zones that are already
+ * locked due to on-going write requests. In this case, make sure to mark
+ * the queue as needing a restart to ensure that the queue is run again
+ * and the pending writes dispatched once the target zones for the ongoing
+ * write requests are unlocked in dd_finish_request().
  */
 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
 {
@@ -384,6 +391,9 @@
 
 	spin_lock(&dd->lock);
 	rq = __dd_dispatch_request(dd);
+	if (!rq && blk_queue_is_zoned(hctx->queue) &&
+	    !list_empty(&dd->fifo_list[WRITE]))
+		blk_mq_sched_mark_restart_hctx(hctx);
 	spin_unlock(&dd->lock);
 
 	return rq;
diff --git a/block/partition-generic.c b/block/partition-generic.c
index d3d14e8..5f8db5c5 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -249,9 +249,10 @@
 	.uevent		= part_uevent,
 };
 
-static void delete_partition_rcu_cb(struct rcu_head *head)
+static void delete_partition_work_fn(struct work_struct *work)
 {
-	struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
+	struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct,
+					rcu_work);
 
 	part->start_sect = 0;
 	part->nr_sects = 0;
@@ -262,7 +263,8 @@
 void __delete_partition(struct percpu_ref *ref)
 {
 	struct hd_struct *part = container_of(ref, struct hd_struct, ref);
-	call_rcu(&part->rcu_head, delete_partition_rcu_cb);
+	INIT_RCU_WORK(&part->rcu_work, delete_partition_work_fn);
+	queue_rcu_work(system_wq, &part->rcu_work);
 }
 
 /*
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 2dfcf12..5564e73 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -9,7 +9,7 @@
  * Adiantum is a tweakable, length-preserving encryption mode designed for fast
  * and secure disk encryption, especially on CPUs without dedicated crypto
  * instructions.  Adiantum encrypts each sector using the XChaCha12 stream
- * cipher, two passes of an ε-almost-∆-universal (εA∆U) hash function based on
+ * cipher, two passes of an ε-almost-∆-universal (ε-∆U) hash function based on
  * NH and Poly1305, and an invocation of the AES-256 block cipher on a single
  * 16-byte block.  See the paper for details:
  *
@@ -21,12 +21,12 @@
  *	- Stream cipher: XChaCha12 or XChaCha20
  *	- Block cipher: any with a 128-bit block size and 256-bit key
  *
- * This implementation doesn't currently allow other εA∆U hash functions, i.e.
+ * This implementation doesn't currently allow other ε-∆U hash functions, i.e.
  * HPolyC is not supported.  This is because Adiantum is ~20% faster than HPolyC
- * but still provably as secure, and also the εA∆U hash function of HBSH is
+ * but still provably as secure, and also the ε-∆U hash function of HBSH is
  * formally defined to take two inputs (tweak, message) which makes it difficult
  * to wrap with the crypto_shash API.  Rather, some details need to be handled
- * here.  Nevertheless, if needed in the future, support for other εA∆U hash
+ * here.  Nevertheless, if needed in the future, support for other ε-∆U hash
  * functions could be added here.
  */
 
@@ -41,7 +41,7 @@
 #include "internal.h"
 
 /*
- * Size of right-hand block of input data, in bytes; also the size of the block
+ * Size of right-hand part of input data, in bytes; also the size of the block
  * cipher's block size and the hash function's output.
  */
 #define BLOCKCIPHER_BLOCK_SIZE		16
@@ -77,7 +77,7 @@
 struct adiantum_request_ctx {
 
 	/*
-	 * Buffer for right-hand block of data, i.e.
+	 * Buffer for right-hand part of data, i.e.
 	 *
 	 *    P_L => P_M => C_M => C_R when encrypting, or
 	 *    C_R => C_M => P_M => P_L when decrypting.
@@ -93,8 +93,8 @@
 	bool enc; /* true if encrypting, false if decrypting */
 
 	/*
-	 * The result of the Poly1305 εA∆U hash function applied to
-	 * (message length, tweak).
+	 * The result of the Poly1305 ε-∆U hash function applied to
+	 * (bulk length, tweak)
 	 */
 	le128 header_hash;
 
@@ -213,13 +213,16 @@
 }
 
 /*
- * Apply the Poly1305 εA∆U hash function to (message length, tweak) and save the
- * result to rctx->header_hash.
+ * Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the
+ * result to rctx->header_hash.  This is the calculation
  *
- * This value is reused in both the first and second hash steps.  Specifically,
- * it's added to the result of an independently keyed εA∆U hash function (for
- * equal length inputs only) taken over the message.  This gives the overall
- * Adiantum hash of the (tweak, message) pair.
+ *	H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T)
+ *
+ * from the procedure in section 6.4 of the Adiantum paper.  The resulting value
+ * is reused in both the first and second hash steps.  Specifically, it's added
+ * to the result of an independently keyed ε-∆U hash function (for equal length
+ * inputs only) taken over the left-hand part (the "bulk") of the message, to
+ * give the overall Adiantum hash of the (tweak, left-hand part) pair.
  */
 static void adiantum_hash_header(struct skcipher_request *req)
 {
@@ -248,7 +251,7 @@
 	poly1305_core_emit(&state, &rctx->header_hash);
 }
 
-/* Hash the left-hand block (the "bulk") of the message using NHPoly1305 */
+/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
 static int adiantum_hash_message(struct skcipher_request *req,
 				 struct scatterlist *sgl, le128 *digest)
 {
@@ -536,6 +539,8 @@
 	ictx = skcipher_instance_ctx(inst);
 
 	/* Stream cipher, e.g. "xchacha12" */
+	crypto_set_skcipher_spawn(&ictx->streamcipher_spawn,
+				  skcipher_crypto_instance(inst));
 	err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
 				   0, crypto_requires_sync(algt->type,
 							   algt->mask));
@@ -544,13 +549,15 @@
 	streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
 
 	/* Block cipher, e.g. "aes" */
+	crypto_set_spawn(&ictx->blockcipher_spawn,
+			 skcipher_crypto_instance(inst));
 	err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
 				CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
 	if (err)
 		goto out_drop_streamcipher;
 	blockcipher_alg = ictx->blockcipher_spawn.alg;
 
-	/* NHPoly1305 εA∆U hash function */
+	/* NHPoly1305 ε-∆U hash function */
 	_hash_alg = crypto_alg_mod_lookup(nhpoly1305_name,
 					  CRYPTO_ALG_TYPE_SHASH,
 					  CRYPTO_ALG_TYPE_MASK);
@@ -561,10 +568,8 @@
 	hash_alg = __crypto_shash_alg(_hash_alg);
 	err = crypto_init_shash_spawn(&ictx->hash_spawn, hash_alg,
 				      skcipher_crypto_instance(inst));
-	if (err) {
-		crypto_mod_put(_hash_alg);
-		goto out_drop_blockcipher;
-	}
+	if (err)
+		goto out_put_hash;
 
 	/* Check the set of algorithms */
 	if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
@@ -590,6 +595,8 @@
 		     hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto out_drop_hash;
 
+	inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags &
+				   CRYPTO_ALG_ASYNC;
 	inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
 	inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
 	inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask |
@@ -619,10 +626,13 @@
 	if (err)
 		goto out_drop_hash;
 
+	crypto_mod_put(_hash_alg);
 	return 0;
 
 out_drop_hash:
 	crypto_drop_shash(&ictx->hash_spawn);
+out_put_hash:
+	crypto_mod_put(_hash_alg);
 out_drop_blockcipher:
 	crypto_drop_spawn(&ictx->blockcipher_spawn);
 out_drop_streamcipher:
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 37f54d1..4be293a 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -58,14 +58,22 @@
 		return -EINVAL;
 	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
 		return -EINVAL;
-	if (RTA_PAYLOAD(rta) < sizeof(*param))
+
+	/*
+	 * RTA_OK() didn't align the rtattr's payload when validating that it
+	 * fits in the buffer.  Yet, the keys should start on the next 4-byte
+	 * aligned boundary.  To avoid confusion, require that the rtattr
+	 * payload be exactly the param struct, which has a 4-byte aligned size.
+	 */
+	if (RTA_PAYLOAD(rta) != sizeof(*param))
 		return -EINVAL;
+	BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
 
 	param = RTA_DATA(rta);
 	keys->enckeylen = be32_to_cpu(param->enckeylen);
 
-	key += RTA_ALIGN(rta->rta_len);
-	keylen -= RTA_ALIGN(rta->rta_len);
+	key += rta->rta_len;
+	keylen -= rta->rta_len;
 
 	if (keylen < keys->enckeylen)
 		return -EINVAL;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 80a25cc..4741fe8 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -279,7 +279,7 @@
 	struct aead_request *req = areq->data;
 
 	err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
-	aead_request_complete(req, err);
+	authenc_esn_request_complete(req, err);
 }
 
 static int crypto_authenc_esn_decrypt(struct aead_request *req)
diff --git a/crypto/cfb.c b/crypto/cfb.c
index 20987d0..e81e456 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -144,7 +144,7 @@
 
 	do {
 		crypto_cfb_encrypt_one(tfm, iv, dst);
-		crypto_xor(dst, iv, bsize);
+		crypto_xor(dst, src, bsize);
 		iv = src;
 
 		src += bsize;
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 8facafd..adcce31 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -842,15 +842,23 @@
 
 static void ecc_point_mult(struct ecc_point *result,
 			   const struct ecc_point *point, const u64 *scalar,
-			   u64 *initial_z, u64 *curve_prime,
+			   u64 *initial_z, const struct ecc_curve *curve,
 			   unsigned int ndigits)
 {
 	/* R0 and R1 */
 	u64 rx[2][ECC_MAX_DIGITS];
 	u64 ry[2][ECC_MAX_DIGITS];
 	u64 z[ECC_MAX_DIGITS];
+	u64 sk[2][ECC_MAX_DIGITS];
+	u64 *curve_prime = curve->p;
 	int i, nb;
-	int num_bits = vli_num_bits(scalar, ndigits);
+	int num_bits;
+	int carry;
+
+	carry = vli_add(sk[0], scalar, curve->n, ndigits);
+	vli_add(sk[1], sk[0], curve->n, ndigits);
+	scalar = sk[!carry];
+	num_bits = sizeof(u64) * ndigits * 8 + 1;
 
 	vli_set(rx[1], point->x, ndigits);
 	vli_set(ry[1], point->y, ndigits);
@@ -1004,7 +1012,7 @@
 		goto out;
 	}
 
-	ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits);
+	ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
 	if (ecc_point_is_zero(pk)) {
 		ret = -EAGAIN;
 		goto err_free_point;
@@ -1090,7 +1098,7 @@
 		goto err_alloc_product;
 	}
 
-	ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
+	ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
 
 	ecc_swap_digits(product->x, secret, ndigits);
 
diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c
index c838585..ec831a5 100644
--- a/crypto/nhpoly1305.c
+++ b/crypto/nhpoly1305.c
@@ -9,15 +9,15 @@
  * "NHPoly1305" is the main component of Adiantum hashing.
  * Specifically, it is the calculation
  *
- *	H_M ← Poly1305_{K_M}(NH_{K_N}(pad_{128}(M)))
+ *	H_L ← Poly1305_{K_L}(NH_{K_N}(pad_{128}(L)))
  *
- * from the procedure in section A.5 of the Adiantum paper [1].  It is an
- * ε-almost-∆-universal (εA∆U) hash function for equal-length inputs over
+ * from the procedure in section 6.4 of the Adiantum paper [1].  It is an
+ * ε-almost-∆-universal (ε-∆U) hash function for equal-length inputs over
  * Z/(2^{128}Z), where the "∆" operation is addition.  It hashes 1024-byte
  * chunks of the input with the NH hash function [2], reducing the input length
  * by 32x.  The resulting NH digests are evaluated as a polynomial in
  * GF(2^{130}-5), like in the Poly1305 MAC [3].  Note that the polynomial
- * evaluation by itself would suffice to achieve the εA∆U property; NH is used
+ * evaluation by itself would suffice to achieve the ε-∆U property; NH is used
  * for performance since it's over twice as fast as Poly1305.
  *
  * This is *not* a cryptographic hash function; do not use it as such!
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index 9a5c60f..c0cf87a 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -100,7 +100,7 @@
 
 	for (i = 0; i <= 63; i++) {
 
-		ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
+		ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
 
 		ss2 = ss1 ^ rol32(a, 12);
 
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index a129c12..740ef57 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1736,6 +1736,7 @@
 		ret += tcrypt_test("xts(aes)");
 		ret += tcrypt_test("ctr(aes)");
 		ret += tcrypt_test("rfc3686(ctr(aes))");
+		ret += tcrypt_test("cfb(aes)");
 		break;
 
 	case 11:
@@ -2062,6 +2063,10 @@
 				speed_template_16_24_32);
 		test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
 				speed_template_16_24_32);
+		test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
+				speed_template_16_24_32);
+		test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
+				speed_template_16_24_32);
 		break;
 
 	case 201:
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index a843ae2..1ffa4b3 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2697,6 +2697,13 @@
 			}
 		}
 	}, {
+		.alg = "cfb(aes)",
+		.test = alg_test_skcipher,
+		.fips_allowed = 1,
+		.suite = {
+			.cipher = __VECS(aes_cfb_tv_template)
+		},
+	}, {
 		.alg = "chacha20",
 		.test = alg_test_skcipher,
 		.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index e2c259f..dce4bca 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -12575,6 +12575,82 @@
 	},
 };
 
+static const struct cipher_testvec aes_cfb_tv_template[] = {
+	{ /* From NIST SP800-38A */
+		.key	= "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+			  "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+		.klen	= 16,
+		.iv	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.ptext	= "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+			  "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+			  "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+			  "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+			  "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+			  "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.ctext	= "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+			  "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+			  "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f"
+			  "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b"
+			  "\x26\x75\x1f\x67\xa3\xcb\xb1\x40"
+			  "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf"
+			  "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e"
+			  "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6",
+		.len	= 64,
+	}, {
+		.key	= "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
+			  "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
+			  "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
+		.klen	= 24,
+		.iv	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.ptext	= "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+			  "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+			  "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+			  "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+			  "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+			  "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.ctext	= "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab"
+			  "\x34\xc2\x59\x09\xc9\x9a\x41\x74"
+			  "\x67\xce\x7f\x7f\x81\x17\x36\x21"
+			  "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a"
+			  "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1"
+			  "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9"
+			  "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0"
+			  "\x42\xae\x8f\xba\x58\x4b\x09\xff",
+		.len	= 64,
+	}, {
+		.key	= "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+			  "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+			  "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+			  "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+		.klen	= 32,
+		.iv	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.ptext	= "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+			  "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+			  "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+			  "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+			  "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+			  "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+		.ctext	= "\xdc\x7e\x84\xbf\xda\x79\x16\x4b"
+			  "\x7e\xcd\x84\x86\x98\x5d\x38\x60"
+			  "\x39\xff\xed\x14\x3b\x28\xb1\xc8"
+			  "\x32\x11\x3c\x63\x31\xe5\x40\x7b"
+			  "\xdf\x10\x13\x24\x15\xe5\x4b\x92"
+			  "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9"
+			  "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
+			  "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
+		.len	= 64,
+	},
+};
+
 static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
 	{ /* Input data from RFC 2410 Case 1 */
 #ifdef __LITTLE_ENDIAN
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index e938576..e48eebc 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -951,9 +951,10 @@
 {
 	struct acpi_iort_node *node;
 	struct acpi_iort_root_complex *rc;
+	struct pci_bus *pbus = to_pci_dev(dev)->bus;
 
 	node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
-			      iort_match_node_callback, dev);
+			      iort_match_node_callback, &pbus->dev);
 	if (!node || node->revision < 1)
 		return -ENODEV;
 
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 75b331f..ea59c01 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -391,6 +391,32 @@
 	return id;
 }
 
+static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
+		struct nd_cmd_pkg *call_pkg)
+{
+	if (call_pkg) {
+		int i;
+
+		if (nfit_mem->family != call_pkg->nd_family)
+			return -ENOTTY;
+
+		for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
+			if (call_pkg->nd_reserved2[i])
+				return -EINVAL;
+		return call_pkg->nd_command;
+	}
+
+	/* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
+	if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
+		return cmd;
+
+	/*
+	 * Force function number validation to fail since 0 is never
+	 * published as a valid function in dsm_mask.
+	 */
+	return 0;
+}
+
 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
 		unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
 {
@@ -404,30 +430,23 @@
 	unsigned long cmd_mask, dsm_mask;
 	u32 offset, fw_status = 0;
 	acpi_handle handle;
-	unsigned int func;
 	const guid_t *guid;
-	int rc, i;
+	int func, rc, i;
 
 	if (cmd_rc)
 		*cmd_rc = -EINVAL;
-	func = cmd;
-	if (cmd == ND_CMD_CALL) {
-		call_pkg = buf;
-		func = call_pkg->nd_command;
-
-		for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
-			if (call_pkg->nd_reserved2[i])
-				return -EINVAL;
-	}
 
 	if (nvdimm) {
 		struct acpi_device *adev = nfit_mem->adev;
 
 		if (!adev)
 			return -ENOTTY;
-		if (call_pkg && nfit_mem->family != call_pkg->nd_family)
-			return -ENOTTY;
 
+		if (cmd == ND_CMD_CALL)
+			call_pkg = buf;
+		func = cmd_to_func(nfit_mem, cmd, call_pkg);
+		if (func < 0)
+			return func;
 		dimm_name = nvdimm_name(nvdimm);
 		cmd_name = nvdimm_cmd_name(cmd);
 		cmd_mask = nvdimm_cmd_mask(nvdimm);
@@ -438,6 +457,7 @@
 	} else {
 		struct acpi_device *adev = to_acpi_dev(acpi_desc);
 
+		func = cmd;
 		cmd_name = nvdimm_bus_cmd_name(cmd);
 		cmd_mask = nd_desc->cmd_mask;
 		dsm_mask = cmd_mask;
@@ -452,7 +472,13 @@
 	if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
 		return -ENOTTY;
 
-	if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
+	/*
+	 * Check for a valid command.  For ND_CMD_CALL, we also have to
+	 * make sure that the DSM function is supported.
+	 */
+	if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
+		return -ENOTTY;
+	else if (!test_bit(cmd, &cmd_mask))
 		return -ENOTTY;
 
 	in_obj.type = ACPI_TYPE_PACKAGE;
@@ -1764,6 +1790,13 @@
 		return 0;
 	}
 
+	/*
+	 * Function 0 is the command interrogation function, don't
+	 * export it to potential userspace use, and enable it to be
+	 * used as an error value in acpi_nfit_ctl().
+	 */
+	dsm_mask &= ~1UL;
+
 	guid = to_nfit_uuid(nfit_mem->family);
 	for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
 		if (acpi_check_dsm(adev_dimm->handle, guid,
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 316e551..bb5391f 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -27,8 +27,11 @@
 #define GPI1_LDO_ON		(3 << 0)
 #define GPI1_LDO_OFF		(4 << 0)
 
-#define AXP288_ADC_TS_PIN_GPADC	0xf2
-#define AXP288_ADC_TS_PIN_ON	0xf3
+#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK		GENMASK(1, 0)
+#define AXP288_ADC_TS_CURRENT_OFF			(0 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING		(1 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND		(2 << 0)
+#define AXP288_ADC_TS_CURRENT_ON			(3 << 0)
 
 static struct pmic_table power_table[] = {
 	{
@@ -211,22 +214,44 @@
  */
 static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
 {
+	int ret, adc_ts_pin_ctrl;
 	u8 buf[2];
-	int ret;
 
-	ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL,
-			   AXP288_ADC_TS_PIN_GPADC);
+	/*
+	 * The current-source used for the battery temp-sensor (TS) is shared
+	 * with the GPADC. For proper fuel-gauge and charger operation the TS
+	 * current-source needs to be permanently on. But to read the GPADC we
+	 * need to temporary switch the TS current-source to ondemand, so that
+	 * the GPADC can use it, otherwise we will always read an all 0 value.
+	 *
+	 * Note that the switching from on to on-ondemand is not necessary
+	 * when the TS current-source is off (this happens on devices which
+	 * do not use the TS-pin).
+	 */
+	ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl);
 	if (ret)
 		return ret;
 
-	/* After switching to the GPADC pin give things some time to settle */
-	usleep_range(6000, 10000);
+	if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
+		ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
+					 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+					 AXP288_ADC_TS_CURRENT_ON_ONDEMAND);
+		if (ret)
+			return ret;
+
+		/* Wait a bit after switching the current-source */
+		usleep_range(6000, 10000);
+	}
 
 	ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
 	if (ret == 0)
 		ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
 
-	regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON);
+	if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
+		regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
+				   AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+				   AXP288_ADC_TS_CURRENT_ON);
+	}
 
 	return ret;
 }
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 1b475bc..665e93c 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -131,6 +131,23 @@
 	}
 }
 
+static bool acpi_power_resource_is_dup(union acpi_object *package,
+				       unsigned int start, unsigned int i)
+{
+	acpi_handle rhandle, dup;
+	unsigned int j;
+
+	/* The caller is expected to check the package element types */
+	rhandle = package->package.elements[i].reference.handle;
+	for (j = start; j < i; j++) {
+		dup = package->package.elements[j].reference.handle;
+		if (dup == rhandle)
+			return true;
+	}
+
+	return false;
+}
+
 int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
 				 struct list_head *list)
 {
@@ -150,6 +167,11 @@
 			err = -ENODEV;
 			break;
 		}
+
+		/* Some ACPI tables contain duplicate power resource references */
+		if (acpi_power_resource_is_dup(package, start, i))
+			continue;
+
 		err = acpi_add_power_resource(rhandle);
 		if (err)
 			break;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 7690884..6495abf 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -5474,6 +5474,9 @@
 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
 		struct binder_node *node = rb_entry(n, struct binder_node,
 						    rb_node);
+		if (!print_all && !node->has_async_transaction)
+			continue;
+
 		/*
 		 * take a temporary reference on the node so it
 		 * survives and isn't removed from the tree
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 81c22d2..60e0b77 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -538,6 +538,9 @@
 	}
 	case 'x':	/* gotoxy : LxXXX[yYYY]; */
 	case 'y':	/* gotoxy : LyYYY[xXXX]; */
+		if (priv->esc_seq.buf[priv->esc_seq.len - 1] != ';')
+			break;
+
 		/* If the command is valid, move to the new address */
 		if (parse_xy(esc, &priv->addr.x, &priv->addr.y))
 			charlcd_gotoxy(lcd);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 8bfd27e..585e2e1 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -31,6 +31,9 @@
 
 #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
 
+#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
+	struct driver_attribute driver_attr_##_name =		\
+		__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
 
 static int __must_check bus_rescan_devices_helper(struct device *dev,
 						void *data);
@@ -195,7 +198,7 @@
 	bus_put(bus);
 	return err;
 }
-static DRIVER_ATTR_WO(unbind);
+static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store);
 
 /*
  * Manually attach a device to a driver.
@@ -231,7 +234,7 @@
 	bus_put(bus);
 	return err;
 }
-static DRIVER_ATTR_WO(bind);
+static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
 
 static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
 {
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 0bf3f75..bb666f6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -952,11 +952,11 @@
 
 		while (device_links_busy(dev)) {
 			device_unlock(dev);
-			if (parent)
+			if (parent && dev->bus->need_parent_lock)
 				device_unlock(parent);
 
 			device_links_unbind_consumers(dev);
-			if (parent)
+			if (parent && dev->bus->need_parent_lock)
 				device_lock(parent);
 
 			device_lock(dev);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index a2e59a9..b4a1e88 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -498,12 +498,12 @@
 }
 #endif
 
-static DEVICE_ATTR_RO(phys_index, 0444, phys_index_show, NULL);
+static DEVICE_ATTR(phys_index, 0444, phys_index_show, NULL);
 static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
 static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
 static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
 #ifdef CONFIG_MEMORY_HOTPLUG
-static DEVICE_ATTR_RO(allocated_bytes, 0444, allocated_bytes_show, NULL);
+static DEVICE_ATTR(allocated_bytes, 0444, allocated_bytes_show, NULL);
 #endif
 
 /*
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 60d6cc6..6d54905 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -366,14 +366,16 @@
 			      unsigned int nvec)
 {
 	struct platform_msi_priv_data *data = domain->host_data;
-	struct msi_desc *desc;
-	for_each_msi_entry(desc, data->dev) {
+	struct msi_desc *desc, *tmp;
+	for_each_msi_entry_safe(desc, tmp, data->dev) {
 		if (WARN_ON(!desc->irq || desc->nvec_used != 1))
 			return;
 		if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
 			continue;
 
 		irq_domain_free_irqs_common(domain, desc->irq, 1);
+		list_del(&desc->list);
+		free_msi_entry(desc);
 	}
 }
 
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ea9debf..c9c2bcc 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -83,7 +83,7 @@
 #include <linux/uaccess.h>
 
 static DEFINE_IDR(loop_index_idr);
-static DEFINE_MUTEX(loop_index_mutex);
+static DEFINE_MUTEX(loop_ctl_mutex);
 
 static int max_part;
 static int part_shift;
@@ -631,18 +631,7 @@
 {
 	int rc;
 
-	/*
-	 * bd_mutex has been held already in release path, so don't
-	 * acquire it if this function is called in such case.
-	 *
-	 * If the reread partition isn't from release path, lo_refcnt
-	 * must be at least one and it can only become zero when the
-	 * current holder is released.
-	 */
-	if (!atomic_read(&lo->lo_refcnt))
-		rc = __blkdev_reread_part(bdev);
-	else
-		rc = blkdev_reread_part(bdev);
+	rc = blkdev_reread_part(bdev);
 	if (rc)
 		pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
 			__func__, lo->lo_number, lo->lo_file_name, rc);
@@ -689,26 +678,30 @@
 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
 			  unsigned int arg)
 {
-	struct file	*file, *old_file;
+	struct file	*file = NULL, *old_file;
 	int		error;
+	bool		partscan;
 
+	error = mutex_lock_killable(&loop_ctl_mutex);
+	if (error)
+		return error;
 	error = -ENXIO;
 	if (lo->lo_state != Lo_bound)
-		goto out;
+		goto out_err;
 
 	/* the loop device has to be read-only */
 	error = -EINVAL;
 	if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
-		goto out;
+		goto out_err;
 
 	error = -EBADF;
 	file = fget(arg);
 	if (!file)
-		goto out;
+		goto out_err;
 
 	error = loop_validate_file(file, bdev);
 	if (error)
-		goto out_putf;
+		goto out_err;
 
 	old_file = lo->lo_backing_file;
 
@@ -716,7 +709,7 @@
 
 	/* size of the new backing store needs to be the same */
 	if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
-		goto out_putf;
+		goto out_err;
 
 	/* and ... switch */
 	blk_mq_freeze_queue(lo->lo_queue);
@@ -727,15 +720,22 @@
 			     lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 	loop_update_dio(lo);
 	blk_mq_unfreeze_queue(lo->lo_queue);
-
+	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
+	mutex_unlock(&loop_ctl_mutex);
+	/*
+	 * We must drop file reference outside of loop_ctl_mutex as dropping
+	 * the file ref can take bd_mutex which creates circular locking
+	 * dependency.
+	 */
 	fput(old_file);
-	if (lo->lo_flags & LO_FLAGS_PARTSCAN)
+	if (partscan)
 		loop_reread_partitions(lo, bdev);
 	return 0;
 
- out_putf:
-	fput(file);
- out:
+out_err:
+	mutex_unlock(&loop_ctl_mutex);
+	if (file)
+		fput(file);
 	return error;
 }
 
@@ -910,6 +910,7 @@
 	int		lo_flags = 0;
 	int		error;
 	loff_t		size;
+	bool		partscan;
 
 	/* This is safe, since we have a reference from open(). */
 	__module_get(THIS_MODULE);
@@ -919,13 +920,17 @@
 	if (!file)
 		goto out;
 
+	error = mutex_lock_killable(&loop_ctl_mutex);
+	if (error)
+		goto out_putf;
+
 	error = -EBUSY;
 	if (lo->lo_state != Lo_unbound)
-		goto out_putf;
+		goto out_unlock;
 
 	error = loop_validate_file(file, bdev);
 	if (error)
-		goto out_putf;
+		goto out_unlock;
 
 	mapping = file->f_mapping;
 	inode = mapping->host;
@@ -937,10 +942,10 @@
 	error = -EFBIG;
 	size = get_loop_size(lo, file);
 	if ((loff_t)(sector_t)size != size)
-		goto out_putf;
+		goto out_unlock;
 	error = loop_prepare_queue(lo);
 	if (error)
-		goto out_putf;
+		goto out_unlock;
 
 	error = 0;
 
@@ -972,18 +977,22 @@
 	lo->lo_state = Lo_bound;
 	if (part_shift)
 		lo->lo_flags |= LO_FLAGS_PARTSCAN;
-	if (lo->lo_flags & LO_FLAGS_PARTSCAN)
-		loop_reread_partitions(lo, bdev);
+	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
 
 	/* Grab the block_device to prevent its destruction after we
-	 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
+	 * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
 	 */
 	bdgrab(bdev);
+	mutex_unlock(&loop_ctl_mutex);
+	if (partscan)
+		loop_reread_partitions(lo, bdev);
 	return 0;
 
- out_putf:
+out_unlock:
+	mutex_unlock(&loop_ctl_mutex);
+out_putf:
 	fput(file);
- out:
+out:
 	/* This is safe: open() is still holding a reference. */
 	module_put(THIS_MODULE);
 	return error;
@@ -1026,39 +1035,31 @@
 	return err;
 }
 
-static int loop_clr_fd(struct loop_device *lo)
+static int __loop_clr_fd(struct loop_device *lo, bool release)
 {
-	struct file *filp = lo->lo_backing_file;
+	struct file *filp = NULL;
 	gfp_t gfp = lo->old_gfp_mask;
 	struct block_device *bdev = lo->lo_device;
+	int err = 0;
+	bool partscan = false;
+	int lo_number;
 
-	if (lo->lo_state != Lo_bound)
-		return -ENXIO;
-
-	/*
-	 * If we've explicitly asked to tear down the loop device,
-	 * and it has an elevated reference count, set it for auto-teardown when
-	 * the last reference goes away. This stops $!~#$@ udev from
-	 * preventing teardown because it decided that it needs to run blkid on
-	 * the loopback device whenever they appear. xfstests is notorious for
-	 * failing tests because blkid via udev races with a losetup
-	 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
-	 * command to fail with EBUSY.
-	 */
-	if (atomic_read(&lo->lo_refcnt) > 1) {
-		lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
-		mutex_unlock(&lo->lo_ctl_mutex);
-		return 0;
+	mutex_lock(&loop_ctl_mutex);
+	if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
+		err = -ENXIO;
+		goto out_unlock;
 	}
 
-	if (filp == NULL)
-		return -EINVAL;
+	filp = lo->lo_backing_file;
+	if (filp == NULL) {
+		err = -EINVAL;
+		goto out_unlock;
+	}
 
 	/* freeze request queue during the transition */
 	blk_mq_freeze_queue(lo->lo_queue);
 
 	spin_lock_irq(&lo->lo_lock);
-	lo->lo_state = Lo_rundown;
 	lo->lo_backing_file = NULL;
 	spin_unlock_irq(&lo->lo_lock);
 
@@ -1094,21 +1095,73 @@
 	module_put(THIS_MODULE);
 	blk_mq_unfreeze_queue(lo->lo_queue);
 
-	if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
-		loop_reread_partitions(lo, bdev);
+	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
+	lo_number = lo->lo_number;
 	lo->lo_flags = 0;
 	if (!part_shift)
 		lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
 	loop_unprepare_queue(lo);
-	mutex_unlock(&lo->lo_ctl_mutex);
+out_unlock:
+	mutex_unlock(&loop_ctl_mutex);
+	if (partscan) {
+		/*
+		 * bd_mutex has been held already in release path, so don't
+		 * acquire it if this function is called in such case.
+		 *
+		 * If the reread partition isn't from release path, lo_refcnt
+		 * must be at least one and it can only become zero when the
+		 * current holder is released.
+		 */
+		if (release)
+			err = __blkdev_reread_part(bdev);
+		else
+			err = blkdev_reread_part(bdev);
+		pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
+			__func__, lo_number, err);
+		/* Device is gone, no point in returning error */
+		err = 0;
+	}
 	/*
-	 * Need not hold lo_ctl_mutex to fput backing file.
-	 * Calling fput holding lo_ctl_mutex triggers a circular
+	 * Need not hold loop_ctl_mutex to fput backing file.
+	 * Calling fput holding loop_ctl_mutex triggers a circular
 	 * lock dependency possibility warning as fput can take
-	 * bd_mutex which is usually taken before lo_ctl_mutex.
+	 * bd_mutex which is usually taken before loop_ctl_mutex.
 	 */
-	fput(filp);
-	return 0;
+	if (filp)
+		fput(filp);
+	return err;
+}
+
+static int loop_clr_fd(struct loop_device *lo)
+{
+	int err;
+
+	err = mutex_lock_killable(&loop_ctl_mutex);
+	if (err)
+		return err;
+	if (lo->lo_state != Lo_bound) {
+		mutex_unlock(&loop_ctl_mutex);
+		return -ENXIO;
+	}
+	/*
+	 * If we've explicitly asked to tear down the loop device,
+	 * and it has an elevated reference count, set it for auto-teardown when
+	 * the last reference goes away. This stops $!~#$@ udev from
+	 * preventing teardown because it decided that it needs to run blkid on
+	 * the loopback device whenever they appear. xfstests is notorious for
+	 * failing tests because blkid via udev races with a losetup
+	 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
+	 * command to fail with EBUSY.
+	 */
+	if (atomic_read(&lo->lo_refcnt) > 1) {
+		lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
+		mutex_unlock(&loop_ctl_mutex);
+		return 0;
+	}
+	lo->lo_state = Lo_rundown;
+	mutex_unlock(&loop_ctl_mutex);
+
+	return __loop_clr_fd(lo, false);
 }
 
 static int
@@ -1117,47 +1170,72 @@
 	int err;
 	struct loop_func_table *xfer;
 	kuid_t uid = current_uid();
+	struct block_device *bdev;
+	bool partscan = false;
 
+	err = mutex_lock_killable(&loop_ctl_mutex);
+	if (err)
+		return err;
 	if (lo->lo_encrypt_key_size &&
 	    !uid_eq(lo->lo_key_owner, uid) &&
-	    !capable(CAP_SYS_ADMIN))
-		return -EPERM;
-	if (lo->lo_state != Lo_bound)
-		return -ENXIO;
-	if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
-		return -EINVAL;
+	    !capable(CAP_SYS_ADMIN)) {
+		err = -EPERM;
+		goto out_unlock;
+	}
+	if (lo->lo_state != Lo_bound) {
+		err = -ENXIO;
+		goto out_unlock;
+	}
+	if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) {
+		err = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (lo->lo_offset != info->lo_offset ||
+	    lo->lo_sizelimit != info->lo_sizelimit) {
+		sync_blockdev(lo->lo_device);
+		kill_bdev(lo->lo_device);
+	}
 
 	/* I/O need to be drained during transfer transition */
 	blk_mq_freeze_queue(lo->lo_queue);
 
 	err = loop_release_xfer(lo);
 	if (err)
-		goto exit;
+		goto out_unfreeze;
 
 	if (info->lo_encrypt_type) {
 		unsigned int type = info->lo_encrypt_type;
 
 		if (type >= MAX_LO_CRYPT) {
 			err = -EINVAL;
-			goto exit;
+			goto out_unfreeze;
 		}
 		xfer = xfer_funcs[type];
 		if (xfer == NULL) {
 			err = -EINVAL;
-			goto exit;
+			goto out_unfreeze;
 		}
 	} else
 		xfer = NULL;
 
 	err = loop_init_xfer(lo, xfer, info);
 	if (err)
-		goto exit;
+		goto out_unfreeze;
 
 	if (lo->lo_offset != info->lo_offset ||
 	    lo->lo_sizelimit != info->lo_sizelimit) {
+		/* kill_bdev should have truncated all the pages */
+		if (lo->lo_device->bd_inode->i_mapping->nrpages) {
+			err = -EAGAIN;
+			pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+				__func__, lo->lo_number, lo->lo_file_name,
+				lo->lo_device->bd_inode->i_mapping->nrpages);
+			goto out_unfreeze;
+		}
 		if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
 			err = -EFBIG;
-			goto exit;
+			goto out_unfreeze;
 		}
 	}
 
@@ -1189,15 +1267,20 @@
 	/* update dio if lo_offset or transfer is changed */
 	__loop_update_dio(lo, lo->use_dio);
 
- exit:
+out_unfreeze:
 	blk_mq_unfreeze_queue(lo->lo_queue);
 
 	if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
 	     !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
 		lo->lo_flags |= LO_FLAGS_PARTSCAN;
 		lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
-		loop_reread_partitions(lo, lo->lo_device);
+		bdev = lo->lo_device;
+		partscan = true;
 	}
+out_unlock:
+	mutex_unlock(&loop_ctl_mutex);
+	if (partscan)
+		loop_reread_partitions(lo, bdev);
 
 	return err;
 }
@@ -1205,12 +1288,15 @@
 static int
 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
 {
-	struct file *file;
+	struct path path;
 	struct kstat stat;
 	int ret;
 
+	ret = mutex_lock_killable(&loop_ctl_mutex);
+	if (ret)
+		return ret;
 	if (lo->lo_state != Lo_bound) {
-		mutex_unlock(&lo->lo_ctl_mutex);
+		mutex_unlock(&loop_ctl_mutex);
 		return -ENXIO;
 	}
 
@@ -1229,17 +1315,17 @@
 		       lo->lo_encrypt_key_size);
 	}
 
-	/* Drop lo_ctl_mutex while we call into the filesystem. */
-	file = get_file(lo->lo_backing_file);
-	mutex_unlock(&lo->lo_ctl_mutex);
-	ret = vfs_getattr(&file->f_path, &stat, STATX_INO,
-			  AT_STATX_SYNC_AS_STAT);
+	/* Drop loop_ctl_mutex while we call into the filesystem. */
+	path = lo->lo_backing_file->f_path;
+	path_get(&path);
+	mutex_unlock(&loop_ctl_mutex);
+	ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
 	if (!ret) {
 		info->lo_device = huge_encode_dev(stat.dev);
 		info->lo_inode = stat.ino;
 		info->lo_rdevice = huge_encode_dev(stat.rdev);
 	}
-	fput(file);
+	path_put(&path);
 	return ret;
 }
 
@@ -1323,10 +1409,8 @@
 	struct loop_info64 info64;
 	int err;
 
-	if (!arg) {
-		mutex_unlock(&lo->lo_ctl_mutex);
+	if (!arg)
 		return -EINVAL;
-	}
 	err = loop_get_status(lo, &info64);
 	if (!err)
 		err = loop_info64_to_old(&info64, &info);
@@ -1341,10 +1425,8 @@
 	struct loop_info64 info64;
 	int err;
 
-	if (!arg) {
-		mutex_unlock(&lo->lo_ctl_mutex);
+	if (!arg)
 		return -EINVAL;
-	}
 	err = loop_get_status(lo, &info64);
 	if (!err && copy_to_user(arg, &info64, sizeof(info64)))
 		err = -EFAULT;
@@ -1376,22 +1458,64 @@
 
 static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
 {
+	int err = 0;
+
 	if (lo->lo_state != Lo_bound)
 		return -ENXIO;
 
 	if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
 		return -EINVAL;
 
+	if (lo->lo_queue->limits.logical_block_size != arg) {
+		sync_blockdev(lo->lo_device);
+		kill_bdev(lo->lo_device);
+	}
+
 	blk_mq_freeze_queue(lo->lo_queue);
 
+	/* kill_bdev should have truncated all the pages */
+	if (lo->lo_queue->limits.logical_block_size != arg &&
+			lo->lo_device->bd_inode->i_mapping->nrpages) {
+		err = -EAGAIN;
+		pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
+			__func__, lo->lo_number, lo->lo_file_name,
+			lo->lo_device->bd_inode->i_mapping->nrpages);
+		goto out_unfreeze;
+	}
+
 	blk_queue_logical_block_size(lo->lo_queue, arg);
 	blk_queue_physical_block_size(lo->lo_queue, arg);
 	blk_queue_io_min(lo->lo_queue, arg);
 	loop_update_dio(lo);
-
+out_unfreeze:
 	blk_mq_unfreeze_queue(lo->lo_queue);
 
-	return 0;
+	return err;
+}
+
+static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
+			   unsigned long arg)
+{
+	int err;
+
+	err = mutex_lock_killable(&loop_ctl_mutex);
+	if (err)
+		return err;
+	switch (cmd) {
+	case LOOP_SET_CAPACITY:
+		err = loop_set_capacity(lo);
+		break;
+	case LOOP_SET_DIRECT_IO:
+		err = loop_set_dio(lo, arg);
+		break;
+	case LOOP_SET_BLOCK_SIZE:
+		err = loop_set_block_size(lo, arg);
+		break;
+	default:
+		err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
+	}
+	mutex_unlock(&loop_ctl_mutex);
+	return err;
 }
 
 static int lo_ioctl(struct block_device *bdev, fmode_t mode,
@@ -1400,64 +1524,42 @@
 	struct loop_device *lo = bdev->bd_disk->private_data;
 	int err;
 
-	err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1);
-	if (err)
-		goto out_unlocked;
-
 	switch (cmd) {
 	case LOOP_SET_FD:
-		err = loop_set_fd(lo, mode, bdev, arg);
-		break;
+		return loop_set_fd(lo, mode, bdev, arg);
 	case LOOP_CHANGE_FD:
-		err = loop_change_fd(lo, bdev, arg);
-		break;
+		return loop_change_fd(lo, bdev, arg);
 	case LOOP_CLR_FD:
-		/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
-		err = loop_clr_fd(lo);
-		if (!err)
-			goto out_unlocked;
-		break;
+		return loop_clr_fd(lo);
 	case LOOP_SET_STATUS:
 		err = -EPERM;
-		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
 			err = loop_set_status_old(lo,
 					(struct loop_info __user *)arg);
+		}
 		break;
 	case LOOP_GET_STATUS:
-		err = loop_get_status_old(lo, (struct loop_info __user *) arg);
-		/* loop_get_status() unlocks lo_ctl_mutex */
-		goto out_unlocked;
+		return loop_get_status_old(lo, (struct loop_info __user *) arg);
 	case LOOP_SET_STATUS64:
 		err = -EPERM;
-		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
 			err = loop_set_status64(lo,
 					(struct loop_info64 __user *) arg);
+		}
 		break;
 	case LOOP_GET_STATUS64:
-		err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
-		/* loop_get_status() unlocks lo_ctl_mutex */
-		goto out_unlocked;
+		return loop_get_status64(lo, (struct loop_info64 __user *) arg);
 	case LOOP_SET_CAPACITY:
-		err = -EPERM;
-		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
-			err = loop_set_capacity(lo);
-		break;
 	case LOOP_SET_DIRECT_IO:
-		err = -EPERM;
-		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
-			err = loop_set_dio(lo, arg);
-		break;
 	case LOOP_SET_BLOCK_SIZE:
-		err = -EPERM;
-		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
-			err = loop_set_block_size(lo, arg);
-		break;
+		if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		/* Fall through */
 	default:
-		err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
+		err = lo_simple_ioctl(lo, cmd, arg);
+		break;
 	}
-	mutex_unlock(&lo->lo_ctl_mutex);
 
-out_unlocked:
 	return err;
 }
 
@@ -1571,10 +1673,8 @@
 	struct loop_info64 info64;
 	int err;
 
-	if (!arg) {
-		mutex_unlock(&lo->lo_ctl_mutex);
+	if (!arg)
 		return -EINVAL;
-	}
 	err = loop_get_status(lo, &info64);
 	if (!err)
 		err = loop_info64_to_compat(&info64, arg);
@@ -1589,20 +1689,12 @@
 
 	switch(cmd) {
 	case LOOP_SET_STATUS:
-		err = mutex_lock_killable(&lo->lo_ctl_mutex);
-		if (!err) {
-			err = loop_set_status_compat(lo,
-						     (const struct compat_loop_info __user *)arg);
-			mutex_unlock(&lo->lo_ctl_mutex);
-		}
+		err = loop_set_status_compat(lo,
+			     (const struct compat_loop_info __user *)arg);
 		break;
 	case LOOP_GET_STATUS:
-		err = mutex_lock_killable(&lo->lo_ctl_mutex);
-		if (!err) {
-			err = loop_get_status_compat(lo,
-						     (struct compat_loop_info __user *)arg);
-			/* loop_get_status() unlocks lo_ctl_mutex */
-		}
+		err = loop_get_status_compat(lo,
+				     (struct compat_loop_info __user *)arg);
 		break;
 	case LOOP_SET_CAPACITY:
 	case LOOP_CLR_FD:
@@ -1626,9 +1718,11 @@
 static int lo_open(struct block_device *bdev, fmode_t mode)
 {
 	struct loop_device *lo;
-	int err = 0;
+	int err;
 
-	mutex_lock(&loop_index_mutex);
+	err = mutex_lock_killable(&loop_ctl_mutex);
+	if (err)
+		return err;
 	lo = bdev->bd_disk->private_data;
 	if (!lo) {
 		err = -ENXIO;
@@ -1637,26 +1731,30 @@
 
 	atomic_inc(&lo->lo_refcnt);
 out:
-	mutex_unlock(&loop_index_mutex);
+	mutex_unlock(&loop_ctl_mutex);
 	return err;
 }
 
-static void __lo_release(struct loop_device *lo)
+static void lo_release(struct gendisk *disk, fmode_t mode)
 {
-	int err;
+	struct loop_device *lo;
 
+	mutex_lock(&loop_ctl_mutex);
+	lo = disk->private_data;
 	if (atomic_dec_return(&lo->lo_refcnt))
-		return;
+		goto out_unlock;
 
-	mutex_lock(&lo->lo_ctl_mutex);
 	if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
+		if (lo->lo_state != Lo_bound)
+			goto out_unlock;
+		lo->lo_state = Lo_rundown;
+		mutex_unlock(&loop_ctl_mutex);
 		/*
 		 * In autoclear mode, stop the loop thread
 		 * and remove configuration after last close.
 		 */
-		err = loop_clr_fd(lo);
-		if (!err)
-			return;
+		__loop_clr_fd(lo, true);
+		return;
 	} else if (lo->lo_state == Lo_bound) {
 		/*
 		 * Otherwise keep thread (if running) and config,
@@ -1666,14 +1764,8 @@
 		blk_mq_unfreeze_queue(lo->lo_queue);
 	}
 
-	mutex_unlock(&lo->lo_ctl_mutex);
-}
-
-static void lo_release(struct gendisk *disk, fmode_t mode)
-{
-	mutex_lock(&loop_index_mutex);
-	__lo_release(disk->private_data);
-	mutex_unlock(&loop_index_mutex);
+out_unlock:
+	mutex_unlock(&loop_ctl_mutex);
 }
 
 static const struct block_device_operations lo_fops = {
@@ -1712,10 +1804,10 @@
 	struct loop_device *lo = ptr;
 	struct loop_func_table *xfer = data;
 
-	mutex_lock(&lo->lo_ctl_mutex);
+	mutex_lock(&loop_ctl_mutex);
 	if (lo->lo_encryption == xfer)
 		loop_release_xfer(lo);
-	mutex_unlock(&lo->lo_ctl_mutex);
+	mutex_unlock(&loop_ctl_mutex);
 	return 0;
 }
 
@@ -1896,7 +1988,6 @@
 	if (!part_shift)
 		disk->flags |= GENHD_FL_NO_PART_SCAN;
 	disk->flags |= GENHD_FL_EXT_DEVT;
-	mutex_init(&lo->lo_ctl_mutex);
 	atomic_set(&lo->lo_refcnt, 0);
 	lo->lo_number		= i;
 	spin_lock_init(&lo->lo_lock);
@@ -1975,7 +2066,7 @@
 	struct kobject *kobj;
 	int err;
 
-	mutex_lock(&loop_index_mutex);
+	mutex_lock(&loop_ctl_mutex);
 	err = loop_lookup(&lo, MINOR(dev) >> part_shift);
 	if (err < 0)
 		err = loop_add(&lo, MINOR(dev) >> part_shift);
@@ -1983,7 +2074,7 @@
 		kobj = NULL;
 	else
 		kobj = get_disk_and_module(lo->lo_disk);
-	mutex_unlock(&loop_index_mutex);
+	mutex_unlock(&loop_ctl_mutex);
 
 	*part = 0;
 	return kobj;
@@ -1993,9 +2084,13 @@
 			       unsigned long parm)
 {
 	struct loop_device *lo;
-	int ret = -ENOSYS;
+	int ret;
 
-	mutex_lock(&loop_index_mutex);
+	ret = mutex_lock_killable(&loop_ctl_mutex);
+	if (ret)
+		return ret;
+
+	ret = -ENOSYS;
 	switch (cmd) {
 	case LOOP_CTL_ADD:
 		ret = loop_lookup(&lo, parm);
@@ -2009,21 +2104,15 @@
 		ret = loop_lookup(&lo, parm);
 		if (ret < 0)
 			break;
-		ret = mutex_lock_killable(&lo->lo_ctl_mutex);
-		if (ret)
-			break;
 		if (lo->lo_state != Lo_unbound) {
 			ret = -EBUSY;
-			mutex_unlock(&lo->lo_ctl_mutex);
 			break;
 		}
 		if (atomic_read(&lo->lo_refcnt) > 0) {
 			ret = -EBUSY;
-			mutex_unlock(&lo->lo_ctl_mutex);
 			break;
 		}
 		lo->lo_disk->private_data = NULL;
-		mutex_unlock(&lo->lo_ctl_mutex);
 		idr_remove(&loop_index_idr, lo->lo_number);
 		loop_remove(lo);
 		break;
@@ -2033,7 +2122,7 @@
 			break;
 		ret = loop_add(&lo, -1);
 	}
-	mutex_unlock(&loop_index_mutex);
+	mutex_unlock(&loop_ctl_mutex);
 
 	return ret;
 }
@@ -2117,10 +2206,10 @@
 				  THIS_MODULE, loop_probe, NULL, NULL);
 
 	/* pre-create number of devices given by config or max_loop */
-	mutex_lock(&loop_index_mutex);
+	mutex_lock(&loop_ctl_mutex);
 	for (i = 0; i < nr; i++)
 		loop_add(&lo, i);
-	mutex_unlock(&loop_index_mutex);
+	mutex_unlock(&loop_ctl_mutex);
 
 	printk(KERN_INFO "loop: module loaded\n");
 	return 0;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 4d42c7a..af75a5e 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -54,7 +54,6 @@
 
 	spinlock_t		lo_lock;
 	int			lo_state;
-	struct mutex		lo_ctl_mutex;
 	struct kthread_worker	worker;
 	struct task_struct	*worker_task;
 	bool			use_dio;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 14a5125..c13a6d1 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -288,9 +288,10 @@
 	blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
 	set_capacity(nbd->disk, config->bytesize >> 9);
 	if (bdev) {
-		if (bdev->bd_disk)
+		if (bdev->bd_disk) {
 			bd_set_size(bdev, config->bytesize);
-		else
+			set_blocksize(bdev, config->blksize);
+		} else
 			bdev->bd_invalidated = 1;
 		bdput(bdev);
 	}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 73ed5f3..585378b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -5982,7 +5982,6 @@
 	struct list_head *tmp;
 	int dev_id;
 	char opt_buf[6];
-	bool already = false;
 	bool force = false;
 	int ret;
 
@@ -6015,13 +6014,13 @@
 		spin_lock_irq(&rbd_dev->lock);
 		if (rbd_dev->open_count && !force)
 			ret = -EBUSY;
-		else
-			already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
-							&rbd_dev->flags);
+		else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
+					  &rbd_dev->flags))
+			ret = -EINPROGRESS;
 		spin_unlock_irq(&rbd_dev->lock);
 	}
 	spin_unlock(&rbd_dev_list_lock);
-	if (ret < 0 || already)
+	if (ret)
 		return ret;
 
 	if (force) {
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 6352357..99a2c60 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -16,7 +16,7 @@
 	  See Documentation/blockdev/zram.txt for more information.
 
 config ZRAM_WRITEBACK
-       bool "Write back incompressible page to backing device"
+       bool "Write back incompressible or idle page to backing device"
        depends on ZRAM
        default n
        help
@@ -25,6 +25,9 @@
 	 For this feature, admin should set up backing device via
 	 /sys/block/zramX/backing_dev.
 
+	 With /sys/block/zramX/{idle,writeback}, application could ask
+	 idle page's writeback to the backing device to save in memory.
+
 	 See Documentation/blockdev/zram.txt for more information.
 
 config ZRAM_MEMORY_TRACKING
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 586992f..50045f0 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -52,15 +52,23 @@
 static size_t huge_class_size;
 
 static void zram_free_page(struct zram *zram, size_t index);
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+				u32 index, int offset, struct bio *bio);
+
+
+static int zram_slot_trylock(struct zram *zram, u32 index)
+{
+	return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
+}
 
 static void zram_slot_lock(struct zram *zram, u32 index)
 {
-	bit_spin_lock(ZRAM_LOCK, &zram->table[index].value);
+	bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
 }
 
 static void zram_slot_unlock(struct zram *zram, u32 index)
 {
-	bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value);
+	bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
 }
 
 static inline bool init_done(struct zram *zram)
@@ -68,13 +76,6 @@
 	return zram->disksize;
 }
 
-static inline bool zram_allocated(struct zram *zram, u32 index)
-{
-
-	return (zram->table[index].value >> (ZRAM_FLAG_SHIFT + 1)) ||
-					zram->table[index].handle;
-}
-
 static inline struct zram *dev_to_zram(struct device *dev)
 {
 	return (struct zram *)dev_to_disk(dev)->private_data;
@@ -94,19 +95,19 @@
 static bool zram_test_flag(struct zram *zram, u32 index,
 			enum zram_pageflags flag)
 {
-	return zram->table[index].value & BIT(flag);
+	return zram->table[index].flags & BIT(flag);
 }
 
 static void zram_set_flag(struct zram *zram, u32 index,
 			enum zram_pageflags flag)
 {
-	zram->table[index].value |= BIT(flag);
+	zram->table[index].flags |= BIT(flag);
 }
 
 static void zram_clear_flag(struct zram *zram, u32 index,
 			enum zram_pageflags flag)
 {
-	zram->table[index].value &= ~BIT(flag);
+	zram->table[index].flags &= ~BIT(flag);
 }
 
 static inline void zram_set_element(struct zram *zram, u32 index,
@@ -122,15 +123,22 @@
 
 static size_t zram_get_obj_size(struct zram *zram, u32 index)
 {
-	return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
+	return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
 }
 
 static void zram_set_obj_size(struct zram *zram,
 					u32 index, size_t size)
 {
-	unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT;
+	unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
 
-	zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
+	zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
+}
+
+static inline bool zram_allocated(struct zram *zram, u32 index)
+{
+	return zram_get_obj_size(zram, index) ||
+			zram_test_flag(zram, index, ZRAM_SAME) ||
+			zram_test_flag(zram, index, ZRAM_WB);
 }
 
 #if PAGE_SIZE != 4096
@@ -276,17 +284,125 @@
 	return len;
 }
 
-#ifdef CONFIG_ZRAM_WRITEBACK
-static bool zram_wb_enabled(struct zram *zram)
+static ssize_t idle_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
 {
-	return zram->backing_dev;
+	struct zram *zram = dev_to_zram(dev);
+	unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+	int index;
+	char mode_buf[8];
+	ssize_t sz;
+
+	sz = strscpy(mode_buf, buf, sizeof(mode_buf));
+	if (sz <= 0)
+		return -EINVAL;
+
+	/* ignore trailing new line */
+	if (mode_buf[sz - 1] == '\n')
+		mode_buf[sz - 1] = 0x00;
+
+	if (strcmp(mode_buf, "all"))
+		return -EINVAL;
+
+	down_read(&zram->init_lock);
+	if (!init_done(zram)) {
+		up_read(&zram->init_lock);
+		return -EINVAL;
+	}
+
+	for (index = 0; index < nr_pages; index++) {
+		/*
+		 * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
+		 * See the comment in writeback_store.
+		 */
+		zram_slot_lock(zram, index);
+		if (zram_allocated(zram, index) &&
+				!zram_test_flag(zram, index, ZRAM_UNDER_WB))
+			zram_set_flag(zram, index, ZRAM_IDLE);
+		zram_slot_unlock(zram, index);
+	}
+
+	up_read(&zram->init_lock);
+
+	return len;
+}
+
+#ifdef CONFIG_ZRAM_WRITEBACK
+static ssize_t writeback_limit_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct zram *zram = dev_to_zram(dev);
+	u64 val;
+	ssize_t ret = -EINVAL;
+
+	if (kstrtoull(buf, 10, &val))
+		return ret;
+
+	down_read(&zram->init_lock);
+	spin_lock(&zram->wb_limit_lock);
+	zram->wb_limit_enable = val;
+	spin_unlock(&zram->wb_limit_lock);
+	up_read(&zram->init_lock);
+	ret = len;
+
+	return ret;
+}
+
+static ssize_t writeback_limit_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	bool val;
+	struct zram *zram = dev_to_zram(dev);
+
+	down_read(&zram->init_lock);
+	spin_lock(&zram->wb_limit_lock);
+	val = zram->wb_limit_enable;
+	spin_unlock(&zram->wb_limit_lock);
+	up_read(&zram->init_lock);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t writeback_limit_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct zram *zram = dev_to_zram(dev);
+	u64 val;
+	ssize_t ret = -EINVAL;
+
+	if (kstrtoull(buf, 10, &val))
+		return ret;
+
+	down_read(&zram->init_lock);
+	spin_lock(&zram->wb_limit_lock);
+	zram->bd_wb_limit = val;
+	spin_unlock(&zram->wb_limit_lock);
+	up_read(&zram->init_lock);
+	ret = len;
+
+	return ret;
+}
+
+static ssize_t writeback_limit_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	u64 val;
+	struct zram *zram = dev_to_zram(dev);
+
+	down_read(&zram->init_lock);
+	spin_lock(&zram->wb_limit_lock);
+	val = zram->bd_wb_limit;
+	spin_unlock(&zram->wb_limit_lock);
+	up_read(&zram->init_lock);
+
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
 }
 
 static void reset_bdev(struct zram *zram)
 {
 	struct block_device *bdev;
 
-	if (!zram_wb_enabled(zram))
+	if (!zram->backing_dev)
 		return;
 
 	bdev = zram->bdev;
@@ -313,7 +429,7 @@
 	ssize_t ret;
 
 	down_read(&zram->init_lock);
-	if (!zram_wb_enabled(zram)) {
+	if (!zram->backing_dev) {
 		memcpy(buf, "none\n", 5);
 		up_read(&zram->init_lock);
 		return 5;
@@ -382,8 +498,10 @@
 
 	bdev = bdgrab(I_BDEV(inode));
 	err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
-	if (err < 0)
+	if (err < 0) {
+		bdev = NULL;
 		goto out;
+	}
 
 	nr_pages = i_size_read(inode) >> PAGE_SHIFT;
 	bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
@@ -399,7 +517,6 @@
 		goto out;
 
 	reset_bdev(zram);
-	spin_lock_init(&zram->bitmap_lock);
 
 	zram->old_block_size = old_block_size;
 	zram->bdev = bdev;
@@ -441,32 +558,29 @@
 	return err;
 }
 
-static unsigned long get_entry_bdev(struct zram *zram)
+static unsigned long alloc_block_bdev(struct zram *zram)
 {
-	unsigned long entry;
-
-	spin_lock(&zram->bitmap_lock);
+	unsigned long blk_idx = 1;
+retry:
 	/* skip 0 bit to confuse zram.handle = 0 */
-	entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
-	if (entry == zram->nr_pages) {
-		spin_unlock(&zram->bitmap_lock);
+	blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
+	if (blk_idx == zram->nr_pages)
 		return 0;
-	}
 
-	set_bit(entry, zram->bitmap);
-	spin_unlock(&zram->bitmap_lock);
+	if (test_and_set_bit(blk_idx, zram->bitmap))
+		goto retry;
 
-	return entry;
+	atomic64_inc(&zram->stats.bd_count);
+	return blk_idx;
 }
 
-static void put_entry_bdev(struct zram *zram, unsigned long entry)
+static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
 {
 	int was_set;
 
-	spin_lock(&zram->bitmap_lock);
-	was_set = test_and_clear_bit(entry, zram->bitmap);
-	spin_unlock(&zram->bitmap_lock);
+	was_set = test_and_clear_bit(blk_idx, zram->bitmap);
 	WARN_ON_ONCE(!was_set);
+	atomic64_dec(&zram->stats.bd_count);
 }
 
 static void zram_page_end_io(struct bio *bio)
@@ -509,6 +623,172 @@
 	return 1;
 }
 
+#define HUGE_WRITEBACK 1
+#define IDLE_WRITEBACK 2
+
+static ssize_t writeback_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct zram *zram = dev_to_zram(dev);
+	unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+	unsigned long index;
+	struct bio bio;
+	struct bio_vec bio_vec;
+	struct page *page;
+	ssize_t ret, sz;
+	char mode_buf[8];
+	int mode = -1;
+	unsigned long blk_idx = 0;
+
+	sz = strscpy(mode_buf, buf, sizeof(mode_buf));
+	if (sz <= 0)
+		return -EINVAL;
+
+	/* ignore trailing newline */
+	if (mode_buf[sz - 1] == '\n')
+		mode_buf[sz - 1] = 0x00;
+
+	if (!strcmp(mode_buf, "idle"))
+		mode = IDLE_WRITEBACK;
+	else if (!strcmp(mode_buf, "huge"))
+		mode = HUGE_WRITEBACK;
+
+	if (mode == -1)
+		return -EINVAL;
+
+	down_read(&zram->init_lock);
+	if (!init_done(zram)) {
+		ret = -EINVAL;
+		goto release_init_lock;
+	}
+
+	if (!zram->backing_dev) {
+		ret = -ENODEV;
+		goto release_init_lock;
+	}
+
+	page = alloc_page(GFP_KERNEL);
+	if (!page) {
+		ret = -ENOMEM;
+		goto release_init_lock;
+	}
+
+	for (index = 0; index < nr_pages; index++) {
+		struct bio_vec bvec;
+
+		bvec.bv_page = page;
+		bvec.bv_len = PAGE_SIZE;
+		bvec.bv_offset = 0;
+
+		spin_lock(&zram->wb_limit_lock);
+		if (zram->wb_limit_enable && !zram->bd_wb_limit) {
+			spin_unlock(&zram->wb_limit_lock);
+			ret = -EIO;
+			break;
+		}
+		spin_unlock(&zram->wb_limit_lock);
+
+		if (!blk_idx) {
+			blk_idx = alloc_block_bdev(zram);
+			if (!blk_idx) {
+				ret = -ENOSPC;
+				break;
+			}
+		}
+
+		zram_slot_lock(zram, index);
+		if (!zram_allocated(zram, index))
+			goto next;
+
+		if (zram_test_flag(zram, index, ZRAM_WB) ||
+				zram_test_flag(zram, index, ZRAM_SAME) ||
+				zram_test_flag(zram, index, ZRAM_UNDER_WB))
+			goto next;
+
+		if (mode == IDLE_WRITEBACK &&
+			  !zram_test_flag(zram, index, ZRAM_IDLE))
+			goto next;
+		if (mode == HUGE_WRITEBACK &&
+			  !zram_test_flag(zram, index, ZRAM_HUGE))
+			goto next;
+		/*
+		 * Clearing ZRAM_UNDER_WB is duty of caller.
+		 * IOW, zram_free_page never clear it.
+		 */
+		zram_set_flag(zram, index, ZRAM_UNDER_WB);
+		/* Need for hugepage writeback racing */
+		zram_set_flag(zram, index, ZRAM_IDLE);
+		zram_slot_unlock(zram, index);
+		if (zram_bvec_read(zram, &bvec, index, 0, NULL)) {
+			zram_slot_lock(zram, index);
+			zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+			zram_clear_flag(zram, index, ZRAM_IDLE);
+			zram_slot_unlock(zram, index);
+			continue;
+		}
+
+		bio_init(&bio, &bio_vec, 1);
+		bio_set_dev(&bio, zram->bdev);
+		bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
+		bio.bi_opf = REQ_OP_WRITE | REQ_SYNC;
+
+		bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
+				bvec.bv_offset);
+		/*
+		 * XXX: A single page IO would be inefficient for write
+		 * but it would be not bad as starter.
+		 */
+		ret = submit_bio_wait(&bio);
+		if (ret) {
+			zram_slot_lock(zram, index);
+			zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+			zram_clear_flag(zram, index, ZRAM_IDLE);
+			zram_slot_unlock(zram, index);
+			continue;
+		}
+
+		atomic64_inc(&zram->stats.bd_writes);
+		/*
+		 * We released zram_slot_lock so need to check if the slot was
+		 * changed. If there is freeing for the slot, we can catch it
+		 * easily by zram_allocated.
+		 * A subtle case is the slot is freed/reallocated/marked as
+		 * ZRAM_IDLE again. To close the race, idle_store doesn't
+		 * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
+		 * Thus, we could close the race by checking ZRAM_IDLE bit.
+		 */
+		zram_slot_lock(zram, index);
+		if (!zram_allocated(zram, index) ||
+			  !zram_test_flag(zram, index, ZRAM_IDLE)) {
+			zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+			zram_clear_flag(zram, index, ZRAM_IDLE);
+			goto next;
+		}
+
+		zram_free_page(zram, index);
+		zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+		zram_set_flag(zram, index, ZRAM_WB);
+		zram_set_element(zram, index, blk_idx);
+		blk_idx = 0;
+		atomic64_inc(&zram->stats.pages_stored);
+		spin_lock(&zram->wb_limit_lock);
+		if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
+			zram->bd_wb_limit -=  1UL << (PAGE_SHIFT - 12);
+		spin_unlock(&zram->wb_limit_lock);
+next:
+		zram_slot_unlock(zram, index);
+	}
+
+	if (blk_idx)
+		free_block_bdev(zram, blk_idx);
+	ret = len;
+	__free_page(page);
+release_init_lock:
+	up_read(&zram->init_lock);
+
+	return ret;
+}
+
 struct zram_work {
 	struct work_struct work;
 	struct zram *zram;
@@ -561,79 +841,21 @@
 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
 			unsigned long entry, struct bio *parent, bool sync)
 {
+	atomic64_inc(&zram->stats.bd_reads);
 	if (sync)
 		return read_from_bdev_sync(zram, bvec, entry, parent);
 	else
 		return read_from_bdev_async(zram, bvec, entry, parent);
 }
-
-static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
-					u32 index, struct bio *parent,
-					unsigned long *pentry)
-{
-	struct bio *bio;
-	unsigned long entry;
-
-	bio = bio_alloc(GFP_ATOMIC, 1);
-	if (!bio)
-		return -ENOMEM;
-
-	entry = get_entry_bdev(zram);
-	if (!entry) {
-		bio_put(bio);
-		return -ENOSPC;
-	}
-
-	bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
-	bio_set_dev(bio, zram->bdev);
-	if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len,
-					bvec->bv_offset)) {
-		bio_put(bio);
-		put_entry_bdev(zram, entry);
-		return -EIO;
-	}
-
-	if (!parent) {
-		bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
-		bio->bi_end_io = zram_page_end_io;
-	} else {
-		bio->bi_opf = parent->bi_opf;
-		bio_chain(bio, parent);
-	}
-
-	submit_bio(bio);
-	*pentry = entry;
-
-	return 0;
-}
-
-static void zram_wb_clear(struct zram *zram, u32 index)
-{
-	unsigned long entry;
-
-	zram_clear_flag(zram, index, ZRAM_WB);
-	entry = zram_get_element(zram, index);
-	zram_set_element(zram, index, 0);
-	put_entry_bdev(zram, entry);
-}
-
 #else
-static bool zram_wb_enabled(struct zram *zram) { return false; }
 static inline void reset_bdev(struct zram *zram) {};
-static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
-					u32 index, struct bio *parent,
-					unsigned long *pentry)
-
-{
-	return -EIO;
-}
-
 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
 			unsigned long entry, struct bio *parent, bool sync)
 {
 	return -EIO;
 }
-static void zram_wb_clear(struct zram *zram, u32 index) {}
+
+static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
 #endif
 
 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
@@ -652,14 +874,10 @@
 
 static void zram_accessed(struct zram *zram, u32 index)
 {
+	zram_clear_flag(zram, index, ZRAM_IDLE);
 	zram->table[index].ac_time = ktime_get_boottime();
 }
 
-static void zram_reset_access(struct zram *zram, u32 index)
-{
-	zram->table[index].ac_time = 0;
-}
-
 static ssize_t read_block_state(struct file *file, char __user *buf,
 				size_t count, loff_t *ppos)
 {
@@ -689,12 +907,13 @@
 
 		ts = ktime_to_timespec64(zram->table[index].ac_time);
 		copied = snprintf(kbuf + written, count,
-			"%12zd %12lld.%06lu %c%c%c\n",
+			"%12zd %12lld.%06lu %c%c%c%c\n",
 			index, (s64)ts.tv_sec,
 			ts.tv_nsec / NSEC_PER_USEC,
 			zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
 			zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
-			zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.');
+			zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
+			zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.');
 
 		if (count < copied) {
 			zram_slot_unlock(zram, index);
@@ -739,8 +958,10 @@
 #else
 static void zram_debugfs_create(void) {};
 static void zram_debugfs_destroy(void) {};
-static void zram_accessed(struct zram *zram, u32 index) {};
-static void zram_reset_access(struct zram *zram, u32 index) {};
+static void zram_accessed(struct zram *zram, u32 index)
+{
+	zram_clear_flag(zram, index, ZRAM_IDLE);
+};
 static void zram_debugfs_register(struct zram *zram) {};
 static void zram_debugfs_unregister(struct zram *zram) {};
 #endif
@@ -877,6 +1098,26 @@
 	return ret;
 }
 
+#ifdef CONFIG_ZRAM_WRITEBACK
+#define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
+static ssize_t bd_stat_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct zram *zram = dev_to_zram(dev);
+	ssize_t ret;
+
+	down_read(&zram->init_lock);
+	ret = scnprintf(buf, PAGE_SIZE,
+		"%8llu %8llu %8llu\n",
+			FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
+			FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
+			FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
+	up_read(&zram->init_lock);
+
+	return ret;
+}
+#endif
+
 static ssize_t debug_stat_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -886,9 +1127,10 @@
 
 	down_read(&zram->init_lock);
 	ret = scnprintf(buf, PAGE_SIZE,
-			"version: %d\n%8llu\n",
+			"version: %d\n%8llu %8llu\n",
 			version,
-			(u64)atomic64_read(&zram->stats.writestall));
+			(u64)atomic64_read(&zram->stats.writestall),
+			(u64)atomic64_read(&zram->stats.miss_free));
 	up_read(&zram->init_lock);
 
 	return ret;
@@ -896,6 +1138,9 @@
 
 static DEVICE_ATTR_RO(io_stat);
 static DEVICE_ATTR_RO(mm_stat);
+#ifdef CONFIG_ZRAM_WRITEBACK
+static DEVICE_ATTR_RO(bd_stat);
+#endif
 static DEVICE_ATTR_RO(debug_stat);
 
 static void zram_meta_free(struct zram *zram, u64 disksize)
@@ -940,17 +1185,21 @@
 {
 	unsigned long handle;
 
-	zram_reset_access(zram, index);
+#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+	zram->table[index].ac_time = 0;
+#endif
+	if (zram_test_flag(zram, index, ZRAM_IDLE))
+		zram_clear_flag(zram, index, ZRAM_IDLE);
 
 	if (zram_test_flag(zram, index, ZRAM_HUGE)) {
 		zram_clear_flag(zram, index, ZRAM_HUGE);
 		atomic64_dec(&zram->stats.huge_pages);
 	}
 
-	if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) {
-		zram_wb_clear(zram, index);
-		atomic64_dec(&zram->stats.pages_stored);
-		return;
+	if (zram_test_flag(zram, index, ZRAM_WB)) {
+		zram_clear_flag(zram, index, ZRAM_WB);
+		free_block_bdev(zram, zram_get_element(zram, index));
+		goto out;
 	}
 
 	/*
@@ -959,10 +1208,8 @@
 	 */
 	if (zram_test_flag(zram, index, ZRAM_SAME)) {
 		zram_clear_flag(zram, index, ZRAM_SAME);
-		zram_set_element(zram, index, 0);
 		atomic64_dec(&zram->stats.same_pages);
-		atomic64_dec(&zram->stats.pages_stored);
-		return;
+		goto out;
 	}
 
 	handle = zram_get_handle(zram, index);
@@ -973,10 +1220,12 @@
 
 	atomic64_sub(zram_get_obj_size(zram, index),
 			&zram->stats.compr_data_size);
+out:
 	atomic64_dec(&zram->stats.pages_stored);
-
 	zram_set_handle(zram, index, 0);
 	zram_set_obj_size(zram, index, 0);
+	WARN_ON_ONCE(zram->table[index].flags &
+		~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
 }
 
 static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
@@ -987,24 +1236,20 @@
 	unsigned int size;
 	void *src, *dst;
 
-	if (zram_wb_enabled(zram)) {
-		zram_slot_lock(zram, index);
-		if (zram_test_flag(zram, index, ZRAM_WB)) {
-			struct bio_vec bvec;
+	zram_slot_lock(zram, index);
+	if (zram_test_flag(zram, index, ZRAM_WB)) {
+		struct bio_vec bvec;
 
-			zram_slot_unlock(zram, index);
-
-			bvec.bv_page = page;
-			bvec.bv_len = PAGE_SIZE;
-			bvec.bv_offset = 0;
-			return read_from_bdev(zram, &bvec,
-					zram_get_element(zram, index),
-					bio, partial_io);
-		}
 		zram_slot_unlock(zram, index);
+
+		bvec.bv_page = page;
+		bvec.bv_len = PAGE_SIZE;
+		bvec.bv_offset = 0;
+		return read_from_bdev(zram, &bvec,
+				zram_get_element(zram, index),
+				bio, partial_io);
 	}
 
-	zram_slot_lock(zram, index);
 	handle = zram_get_handle(zram, index);
 	if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
 		unsigned long value;
@@ -1089,7 +1334,6 @@
 	struct page *page = bvec->bv_page;
 	unsigned long element = 0;
 	enum zram_pageflags flags = 0;
-	bool allow_wb = true;
 
 	mem = kmap_atomic(page);
 	if (page_same_filled(mem, &element)) {
@@ -1114,21 +1358,8 @@
 		return ret;
 	}
 
-	if (unlikely(comp_len >= huge_class_size)) {
+	if (comp_len >= huge_class_size)
 		comp_len = PAGE_SIZE;
-		if (zram_wb_enabled(zram) && allow_wb) {
-			zcomp_stream_put(zram->comp);
-			ret = write_to_bdev(zram, bvec, index, bio, &element);
-			if (!ret) {
-				flags = ZRAM_WB;
-				ret = 1;
-				goto out;
-			}
-			allow_wb = false;
-			goto compress_again;
-		}
-	}
-
 	/*
 	 * handle allocation has 2 paths:
 	 * a) fast path is executed with preemption disabled (for
@@ -1401,10 +1632,14 @@
 
 	zram = bdev->bd_disk->private_data;
 
-	zram_slot_lock(zram, index);
+	atomic64_inc(&zram->stats.notify_free);
+	if (!zram_slot_trylock(zram, index)) {
+		atomic64_inc(&zram->stats.miss_free);
+		return;
+	}
+
 	zram_free_page(zram, index);
 	zram_slot_unlock(zram, index);
-	atomic64_inc(&zram->stats.notify_free);
 }
 
 static int zram_rw_page(struct block_device *bdev, sector_t sector,
@@ -1609,10 +1844,14 @@
 static DEVICE_ATTR_WO(reset);
 static DEVICE_ATTR_WO(mem_limit);
 static DEVICE_ATTR_WO(mem_used_max);
+static DEVICE_ATTR_WO(idle);
 static DEVICE_ATTR_RW(max_comp_streams);
 static DEVICE_ATTR_RW(comp_algorithm);
 #ifdef CONFIG_ZRAM_WRITEBACK
 static DEVICE_ATTR_RW(backing_dev);
+static DEVICE_ATTR_WO(writeback);
+static DEVICE_ATTR_RW(writeback_limit);
+static DEVICE_ATTR_RW(writeback_limit_enable);
 #endif
 
 static struct attribute *zram_disk_attrs[] = {
@@ -1622,13 +1861,20 @@
 	&dev_attr_compact.attr,
 	&dev_attr_mem_limit.attr,
 	&dev_attr_mem_used_max.attr,
+	&dev_attr_idle.attr,
 	&dev_attr_max_comp_streams.attr,
 	&dev_attr_comp_algorithm.attr,
 #ifdef CONFIG_ZRAM_WRITEBACK
 	&dev_attr_backing_dev.attr,
+	&dev_attr_writeback.attr,
+	&dev_attr_writeback_limit.attr,
+	&dev_attr_writeback_limit_enable.attr,
 #endif
 	&dev_attr_io_stat.attr,
 	&dev_attr_mm_stat.attr,
+#ifdef CONFIG_ZRAM_WRITEBACK
+	&dev_attr_bd_stat.attr,
+#endif
 	&dev_attr_debug_stat.attr,
 	NULL,
 };
@@ -1662,7 +1908,9 @@
 	device_id = ret;
 
 	init_rwsem(&zram->init_lock);
-
+#ifdef CONFIG_ZRAM_WRITEBACK
+	spin_lock_init(&zram->wb_limit_lock);
+#endif
 	queue = blk_alloc_queue(GFP_KERNEL);
 	if (!queue) {
 		pr_err("Error allocating disk queue for device %d\n",
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 72c8584..f2fd46d 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -30,7 +30,7 @@
 
 
 /*
- * The lower ZRAM_FLAG_SHIFT bits of table.value is for
+ * The lower ZRAM_FLAG_SHIFT bits of table.flags is for
  * object size (excluding header), the higher bits is for
  * zram_pageflags.
  *
@@ -41,13 +41,15 @@
  */
 #define ZRAM_FLAG_SHIFT 24
 
-/* Flags for zram pages (table[page_no].value) */
+/* Flags for zram pages (table[page_no].flags) */
 enum zram_pageflags {
 	/* zram slot is locked */
 	ZRAM_LOCK = ZRAM_FLAG_SHIFT,
 	ZRAM_SAME,	/* Page consists the same element */
 	ZRAM_WB,	/* page is stored on backing_device */
+	ZRAM_UNDER_WB,	/* page is under writeback */
 	ZRAM_HUGE,	/* Incompressible page */
+	ZRAM_IDLE,	/* not accessed page since last idle marking */
 
 	__NR_ZRAM_PAGEFLAGS,
 };
@@ -60,7 +62,7 @@
 		unsigned long handle;
 		unsigned long element;
 	};
-	unsigned long value;
+	unsigned long flags;
 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
 	ktime_t ac_time;
 #endif
@@ -79,6 +81,12 @@
 	atomic64_t pages_stored;	/* no. of pages currently stored */
 	atomic_long_t max_used_pages;	/* no. of maximum pages stored */
 	atomic64_t writestall;		/* no. of write slow paths */
+	atomic64_t miss_free;		/* no. of missed free */
+#ifdef	CONFIG_ZRAM_WRITEBACK
+	atomic64_t bd_count;		/* no. of pages in backing device */
+	atomic64_t bd_reads;		/* no. of reads from backing device */
+	atomic64_t bd_writes;		/* no. of writes from backing device */
+#endif
 };
 
 struct zram {
@@ -104,13 +112,15 @@
 	 * zram is claimed so open request will be failed
 	 */
 	bool claim; /* Protected by bdev->bd_mutex */
-#ifdef CONFIG_ZRAM_WRITEBACK
 	struct file *backing_dev;
+#ifdef CONFIG_ZRAM_WRITEBACK
+	spinlock_t wb_limit_lock;
+	bool wb_limit_enable;
+	u64 bd_wb_limit;
 	struct block_device *bdev;
 	unsigned int old_block_size;
 	unsigned long *bitmap;
 	unsigned long nr_pages;
-	spinlock_t bitmap_lock;
 #endif
 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
 	struct dentry *debugfs_dir;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index cd2e5cf..77b67a5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -343,6 +343,7 @@
 	/* Intel Bluetooth devices */
 	{ USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
 	{ USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
+	{ USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW },
 	{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
 	{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
 	{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
@@ -2054,6 +2055,35 @@
 	return -EILSEQ;
 }
 
+static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
+					     struct intel_boot_params *params,
+					     char *fw_name, size_t len,
+					     const char *suffix)
+{
+	switch (ver->hw_variant) {
+	case 0x0b:	/* SfP */
+	case 0x0c:	/* WsP */
+		snprintf(fw_name, len, "intel/ibt-%u-%u.%s",
+			le16_to_cpu(ver->hw_variant),
+			le16_to_cpu(params->dev_revid),
+			suffix);
+		break;
+	case 0x11:	/* JfP */
+	case 0x12:	/* ThP */
+	case 0x13:	/* HrP */
+	case 0x14:	/* CcP */
+		snprintf(fw_name, len, "intel/ibt-%u-%u-%u.%s",
+			le16_to_cpu(ver->hw_variant),
+			le16_to_cpu(ver->hw_revision),
+			le16_to_cpu(ver->fw_revision),
+			suffix);
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
 static int btusb_setup_intel_new(struct hci_dev *hdev)
 {
 	struct btusb_data *data = hci_get_drvdata(hdev);
@@ -2105,7 +2135,7 @@
 	case 0x11:	/* JfP */
 	case 0x12:	/* ThP */
 	case 0x13:	/* HrP */
-	case 0x14:	/* QnJ, IcP */
+	case 0x14:	/* CcP */
 		break;
 	default:
 		bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
@@ -2189,23 +2219,9 @@
 	 * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
 	 *
 	 */
-	switch (ver.hw_variant) {
-	case 0x0b:	/* SfP */
-	case 0x0c:	/* WsP */
-		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
-			 le16_to_cpu(ver.hw_variant),
-			 le16_to_cpu(params.dev_revid));
-		break;
-	case 0x11:	/* JfP */
-	case 0x12:	/* ThP */
-	case 0x13:	/* HrP */
-	case 0x14:	/* QnJ, IcP */
-		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
-			 le16_to_cpu(ver.hw_variant),
-			 le16_to_cpu(ver.hw_revision),
-			 le16_to_cpu(ver.fw_revision));
-		break;
-	default:
+	err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
+						sizeof(fwname), "sfi");
+	if (!err) {
 		bt_dev_err(hdev, "Unsupported Intel firmware naming");
 		return -EINVAL;
 	}
@@ -2221,23 +2237,9 @@
 	/* Save the DDC file name for later use to apply once the firmware
 	 * downloading is done.
 	 */
-	switch (ver.hw_variant) {
-	case 0x0b:	/* SfP */
-	case 0x0c:	/* WsP */
-		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
-			 le16_to_cpu(ver.hw_variant),
-			 le16_to_cpu(params.dev_revid));
-		break;
-	case 0x11:	/* JfP */
-	case 0x12:	/* ThP */
-	case 0x13:	/* HrP */
-	case 0x14:	/* QnJ, IcP */
-		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
-			 le16_to_cpu(ver.hw_variant),
-			 le16_to_cpu(ver.hw_revision),
-			 le16_to_cpu(ver.fw_revision));
-		break;
-	default:
+	err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
+						sizeof(fwname), "ddc");
+	if (!err) {
 		bt_dev_err(hdev, "Unsupported Intel firmware naming");
 		return -EINVAL;
 	}
diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c
index eebbf4d..f249911 100644
--- a/drivers/bus/mhi/devices/mhi_netdev.c
+++ b/drivers/bus/mhi/devices/mhi_netdev.c
@@ -550,7 +550,7 @@
 	struct device_node *of_node = mhi_dev->dev.of_node;
 	struct mhi_netdev_priv *mhi_netdev_priv;
 
-	mhi_netdev->alias = of_alias_get_id(of_node, "mhi_netdev");
+	mhi_netdev->alias = of_alias_get_id(of_node, "mhi-netdev");
 	if (mhi_netdev->alias < 0)
 		return -ENODEV;
 
diff --git a/drivers/bus/mhi/devices/mhi_satellite.c b/drivers/bus/mhi/devices/mhi_satellite.c
index fd6b7db..6a785ec 100644
--- a/drivers/bus/mhi/devices/mhi_satellite.c
+++ b/drivers/bus/mhi/devices/mhi_satellite.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2019, The Linux Foundation. All rights reserved.*/
 
+#include <linux/debugfs.h>
 #include <linux/device.h>
 #include <linux/dma-direction.h>
 #include <linux/dma-mapping.h>
@@ -18,6 +19,8 @@
 
 #define MHI_SAT_DRIVER_NAME "mhi_satellite"
 
+static bool mhi_sat_defer_init = true; /* set by default */
+
 /* logging macros */
 #define IPC_LOG_PAGES (10)
 #define IPC_LOG_LVL (MHI_MSG_LVL_INFO)
@@ -282,6 +285,9 @@
 
 	struct mhi_sat_subsys *subsys; /* pointer to subsystem array */
 	unsigned int num_subsys;
+
+	struct dentry *dentry; /* debugfs directory */
+	bool deferred_init_done; /* flag for deferred init protection */
 };
 
 static struct mhi_sat_driver mhi_sat_driver;
@@ -1041,6 +1047,44 @@
 	},
 };
 
+int mhi_sat_trigger_init(void *data, u64 val)
+{
+	struct mhi_sat_subsys *subsys;
+	int i, ret;
+
+	if (mhi_sat_driver.deferred_init_done)
+		return -EIO;
+
+	ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver);
+	if (ret)
+		goto error_sat_trigger_init;
+
+	ret = mhi_driver_register(&mhi_sat_dev_driver);
+	if (ret)
+		goto error_sat_trigger_register;
+
+	mhi_sat_driver.deferred_init_done = true;
+
+	return 0;
+
+error_sat_trigger_register:
+	unregister_rpmsg_driver(&mhi_sat_rpmsg_driver);
+
+error_sat_trigger_init:
+	subsys = mhi_sat_driver.subsys;
+	for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) {
+		ipc_log_context_destroy(subsys->ipc_log);
+		mutex_destroy(&subsys->cntrl_mutex);
+	}
+	kfree(mhi_sat_driver.subsys);
+	mhi_sat_driver.subsys = NULL;
+
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mhi_sat_debugfs_fops, NULL,
+			mhi_sat_trigger_init, "%llu\n");
+
 static int mhi_sat_init(void)
 {
 	struct mhi_sat_subsys *subsys;
@@ -1066,6 +1110,20 @@
 		subsys->ipc_log = ipc_log_context_create(IPC_LOG_PAGES, log, 0);
 	}
 
+	/* create debugfs entry if defer_init is enabled */
+	if (mhi_sat_defer_init) {
+		mhi_sat_driver.dentry = debugfs_create_dir("mhi_sat", NULL);
+		if (IS_ERR_OR_NULL(mhi_sat_driver.dentry)) {
+			ret = -ENODEV;
+			goto error_sat_init;
+		}
+
+		debugfs_create_file("debug", 0444, mhi_sat_driver.dentry, NULL,
+				    &mhi_sat_debugfs_fops);
+
+		return 0;
+	}
+
 	ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver);
 	if (ret)
 		goto error_sat_init;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 9a49e9e..35c50ac 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -58,9 +58,11 @@
 
 #define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME   "audio_pdr_adsprpc"
 #define AUDIO_PDR_ADSP_SERVICE_NAME              "avs/audio"
+#define ADSP_AUDIOPD_NAME                        "msm/adsp/audio_pd"
 
-#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME   "sensors_pdr_adsprpc"
-#define SENSORS_PDR_ADSP_SERVICE_NAME              "tms/servreg"
+#define SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsprpc"
+#define SENSORS_PDR_SLPI_SERVICE_NAME            "tms/servreg"
+#define SLPI_SENSORPD_NAME                       "msm/slpi/sensor_pd"
 
 #define RPC_TIMEOUT	(5 * HZ)
 #define BALIGN		128
@@ -88,6 +90,8 @@
 #define SDSP_DOMAIN_ID (2)
 #define CDSP_DOMAIN_ID (3)
 
+#define RH_CID ADSP_DOMAIN_ID
+
 #define PERF_KEYS \
 	"count:flush:map:copy:rpmsg:getargs:putargs:invalidate:invoke:tid:ptr"
 #define FASTRPC_STATIC_HANDLE_KERNEL (1)
@@ -244,6 +248,7 @@
 };
 
 struct fastrpc_static_pd {
+	char *servloc_name;
 	char *spdname;
 	struct notifier_block pdrnb;
 	struct notifier_block get_service_nb;
@@ -274,7 +279,7 @@
 	int vmid;
 	struct secure_vm rhvm;
 	int ramdumpenabled;
-	void *remoteheap_ramdump_dev;
+	void *rh_dump_dev;
 	/* Indicates, if channel is restricted to secure node only */
 	int secure;
 };
@@ -363,7 +368,7 @@
 	int cid;
 	int ssrcount;
 	int pd;
-	char *spdname;
+	char *servloc_name;
 	int file_close;
 	struct fastrpc_apps *apps;
 	struct hlist_head perf;
@@ -386,19 +391,13 @@
 		.subsys = "adsp",
 		.spd = {
 			{
-				.spdname =
+				.servloc_name =
 					AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
+				.spdname = ADSP_AUDIOPD_NAME,
 				.pdrnb.notifier_call =
 						fastrpc_pdr_notifier_cb,
 				.cid = ADSP_DOMAIN_ID,
 			},
-			{
-				.spdname =
-				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
-				.pdrnb.notifier_call =
-						fastrpc_pdr_notifier_cb,
-				.cid = ADSP_DOMAIN_ID,
-			}
 		},
 	},
 	{
@@ -415,6 +414,11 @@
 		.subsys = "slpi",
 		.spd = {
 			{
+				.servloc_name =
+				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
+				.spdname = SLPI_SENSORPD_NAME,
+				.pdrnb.notifier_call =
+						fastrpc_pdr_notifier_cb,
 				.cid = SDSP_DOMAIN_ID,
 			}
 		},
@@ -1261,7 +1265,6 @@
 		complete(&ictx->work);
 	}
 	spin_unlock(&me->hlock);
-
 }
 
 
@@ -1294,21 +1297,20 @@
 			fastrpc_notify_users(fl);
 	}
 	spin_unlock(&me->hlock);
-
 }
 
-static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname)
+static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me,
+		char *servloc_name)
 {
 	struct fastrpc_file *fl;
 	struct hlist_node *n;
 
 	spin_lock(&me->hlock);
 	hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
-		if (fl->spdname && !strcmp(spdname, fl->spdname))
+		if (fl->servloc_name && !strcmp(servloc_name, fl->servloc_name))
 			fastrpc_notify_users_staticpd_pdr(fl);
 	}
 	spin_unlock(&me->hlock);
-
 }
 
 static void context_list_ctor(struct fastrpc_ctx_lst *me)
@@ -1931,21 +1933,28 @@
 	return err;
 }
 
-static int fastrpc_get_spd_session(char *name, int *session)
+static int fastrpc_get_spd_session(char *name, int *session, int *cid)
 {
 	struct fastrpc_apps *me = &gfa;
-	int err = 0, i;
+	int err = 0, i, j, match = 0;
 
-	for (i = 0; i < NUM_SESSIONS; i++) {
-		if (!me->channel[0].spd[i].spdname)
-			continue;
-		if (!strcmp(name, me->channel[0].spd[i].spdname))
+	for (i = 0; i < NUM_CHANNELS; i++) {
+		for (j = 0; j < NUM_SESSIONS; j++) {
+			if (!me->channel[i].spd[j].servloc_name)
+				continue;
+			if (!strcmp(name, me->channel[i].spd[j].servloc_name)) {
+				match = 1;
+				break;
+			}
+		}
+		if (match)
 			break;
 	}
-	VERIFY(err, i < NUM_SESSIONS);
+	VERIFY(err, i < NUM_CHANNELS && j < NUM_SESSIONS);
 	if (err)
 		goto bail;
-	*session = i;
+	*cid = i;
+	*session = j;
 bail:
 	return err;
 }
@@ -1985,7 +1994,8 @@
 		if (init->flags == FASTRPC_INIT_ATTACH)
 			fl->pd = 0;
 		else if (init->flags == FASTRPC_INIT_ATTACH_SENSORS) {
-			fl->spdname = SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
+			fl->servloc_name =
+				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME;
 			fl->pd = 2;
 		}
 		VERIFY(err, !(err = fastrpc_internal_invoke(fl,
@@ -2114,8 +2124,9 @@
 		inbuf.pageslen = 0;
 
 		if (!strcmp(proc_name, "audiopd")) {
-			fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
-			VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
+			fl->servloc_name =
+				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
+			err = fastrpc_mmap_remove_pdr(fl);
 			if (err)
 				goto bail;
 		}
@@ -2123,9 +2134,9 @@
 		if (!me->staticpd_flags && !(me->legacy_remote_heap)) {
 			inbuf.pageslen = 1;
 			mutex_lock(&fl->map_mutex);
-			VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
+			err = fastrpc_mmap_create(fl, -1, 0, init->mem,
 				 init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR,
-				 &mem));
+				 &mem);
 			mutex_unlock(&fl->map_mutex);
 			if (err)
 				goto bail;
@@ -2406,6 +2417,9 @@
 	struct fastrpc_apps *me = &gfa;
 	struct ramdump_segment *ramdump_segments_rh = NULL;
 
+	VERIFY(err, fl->cid == RH_CID);
+	if (err)
+		goto bail;
 	do {
 		match = NULL;
 		spin_lock(&me->hlock);
@@ -2421,7 +2435,7 @@
 						match->size, match->flags);
 			if (err)
 				goto bail;
-			if (me->channel[0].ramdumpenabled) {
+			if (me->channel[RH_CID].ramdumpenabled) {
 				ramdump_segments_rh = kcalloc(1,
 				sizeof(struct ramdump_segment), GFP_KERNEL);
 				if (ramdump_segments_rh) {
@@ -2429,7 +2443,7 @@
 					match->phys;
 					ramdump_segments_rh->size = match->size;
 					ret = do_elf_ramdump(
-					 me->channel[0].remoteheap_ramdump_dev,
+					 me->channel[RH_CID].rh_dump_dev,
 					 ramdump_segments_rh, 1);
 					if (ret < 0)
 						pr_err("adsprpc: %s: unable to dump heap (err %d)\n",
@@ -2449,10 +2463,13 @@
 static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl)
 {
 	struct fastrpc_apps *me = &gfa;
-	int session = 0, err = 0;
+	int session = 0, err = 0, cid = -1;
 
-	err = fastrpc_get_spd_session(AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
-			&session);
+	err = fastrpc_get_spd_session(fl->servloc_name,
+			&session, &cid);
+	if (err)
+		goto bail;
+	VERIFY(err, cid == fl->cid);
 	if (err)
 		goto bail;
 	if (me->channel[fl->cid].spd[session].pdrcount !=
@@ -2464,12 +2481,10 @@
 		me->channel[fl->cid].spd[session].prevpdrcount =
 				me->channel[fl->cid].spd[session].pdrcount;
 	}
-	if (!me->channel[fl->cid].spd[session].ispdup) {
-		VERIFY(err, 0);
-		if (err) {
-			err = -ENOTCONN;
-			goto bail;
-		}
+	if (!me->channel[fl->cid].spd[session].ispdup &&
+		me->channel[fl->cid].spd[session].pdrhandle) {
+		err = -ENOTCONN;
+		goto bail;
 	}
 bail:
 	return err;
@@ -2969,7 +2984,7 @@
 		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
 			"%s %14s %d\n", "pd", ":", fl->pd);
 		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
-			"%s %9s %s\n", "spdname", ":", fl->spdname);
+			"%s %9s %s\n", "servloc_name", ":", fl->servloc_name);
 		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
 			"%s %6s %d\n", "file_close", ":", fl->file_close);
 		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
@@ -3101,11 +3116,11 @@
 
 	VERIFY(err, fl && fl->sctx);
 	if (err)
-		return err;
+		goto bail;
 	cid = fl->cid;
 	VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
 	if (err)
-		return err;
+		goto bail;
 
 	mutex_lock(&me->channel[cid].rpmsg_mutex);
 	VERIFY(err, NULL != me->channel[cid].rpdev);
@@ -3120,12 +3135,9 @@
 	if (me->channel[cid].ssrcount !=
 				 me->channel[cid].prevssrcount) {
 		if (!me->channel[cid].issubsystemup) {
-			VERIFY(err, 0);
-			if (err) {
-				err = -ENOTCONN;
-				mutex_unlock(&me->channel[cid].smd_mutex);
-				goto bail;
-			}
+			err = -ENOTCONN;
+			mutex_unlock(&me->channel[cid].smd_mutex);
+			goto bail;
 		}
 	}
 	fl->ssrcount = me->channel[cid].ssrcount;
@@ -3519,27 +3531,32 @@
 {
 	struct fastrpc_apps *me = &gfa;
 	struct fastrpc_channel_ctx *ctx;
-	struct notif_data *notifdata = data;
-	int cid;
+	struct notif_data *notifdata = (struct notif_data *)data;
+	int cid = -1;
 
 	ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
 	cid = ctx - &me->channel[0];
 	if (code == SUBSYS_BEFORE_SHUTDOWN) {
+		pr_debug("adsprpc: %s: %s subsystem is restarting\n",
+			__func__, gcinfo[cid].subsys);
 		mutex_lock(&me->channel[cid].smd_mutex);
 		ctx->ssrcount++;
 		ctx->issubsystemup = 0;
 		mutex_unlock(&me->channel[cid].smd_mutex);
-		if (cid == 0)
+		if (cid == RH_CID)
 			me->staticpd_flags = 0;
 	} else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
-		if (me->channel[0].remoteheap_ramdump_dev &&
-				notifdata->enable_ramdump) {
-			me->channel[0].ramdumpenabled = 1;
+		if (cid == RH_CID) {
+			if (me->channel[RH_CID].rh_dump_dev &&
+					notifdata->enable_ramdump) {
+				me->channel[RH_CID].ramdumpenabled = 1;
+			}
 		}
 	} else if (code == SUBSYS_AFTER_POWERUP) {
+		pr_debug("adsprpc: %s: %s subsystem is up\n",
+			__func__, gcinfo[cid].subsys);
 		ctx->issubsystemup = 1;
 	}
-
 	return NOTIFY_DONE;
 }
 
@@ -3549,26 +3566,30 @@
 {
 	struct fastrpc_apps *me = &gfa;
 	struct fastrpc_static_pd *spd;
-	struct notif_data *notifdata = data;
+	struct notif_data *notifdata = (struct notif_data *)data;
 
 	spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb);
 	if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) {
+		pr_debug("adsprpc: %s: %s (%s) is down for PDR\n",
+			__func__, spd->spdname, spd->servloc_name);
 		mutex_lock(&me->channel[spd->cid].smd_mutex);
 		spd->pdrcount++;
 		spd->ispdup = 0;
 		mutex_unlock(&me->channel[spd->cid].smd_mutex);
-		pr_info("adsprpc: %s called for %s (dev %d)\n",
-				__func__, spd->spdname, MAJOR(me->dev_no));
-		if (!strcmp(spd->spdname,
+		if (!strcmp(spd->servloc_name,
 				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
 			me->staticpd_flags = 0;
-		fastrpc_notify_pdr_drivers(me, spd->spdname);
+		fastrpc_notify_pdr_drivers(me, spd->servloc_name);
 	} else if (code == SUBSYS_RAMDUMP_NOTIFICATION) {
-		if (me->channel[0].remoteheap_ramdump_dev &&
-				notifdata->enable_ramdump) {
-			me->channel[0].ramdumpenabled = 1;
+		if (spd->cid == RH_CID) {
+			if (me->channel[RH_CID].rh_dump_dev &&
+					notifdata->enable_ramdump) {
+				me->channel[RH_CID].ramdumpenabled = 1;
+			}
 		}
 	} else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
+		pr_debug("adsprpc: %s: %s (%s) is up\n",
+			__func__, spd->spdname, spd->servloc_name);
 		spd->ispdup = 1;
 	}
 
@@ -3584,18 +3605,20 @@
 
 	spd = container_of(nb, struct fastrpc_static_pd, get_service_nb);
 	if (opcode == LOCATOR_DOWN) {
-		pr_err("adsprpc: %s: PD restart notifier locator down\n",
-				__func__);
+		pr_warn("adsprpc: %s: PDR notifier locator is down for %s\n",
+				__func__, spd->servloc_name);
 		return NOTIFY_DONE;
 	}
 	for (i = 0; i < pdr->total_domains; i++) {
-		if ((!strcmp(spd->spdname, "audio_pdr_adsprpc"))
-					&& (!strcmp(pdr->domain_list[i].name,
-						"msm/adsp/audio_pd"))) {
+		if ((!strcmp(spd->servloc_name,
+				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME))
+				&& (!strcmp(pdr->domain_list[i].name,
+				ADSP_AUDIOPD_NAME))) {
 			goto pdr_register;
-		} else if ((!strcmp(spd->spdname, "sensors_pdr_adsprpc"))
-					&& (!strcmp(pdr->domain_list[i].name,
-						"msm/adsp/sensor_pd"))) {
+		} else if ((!strcmp(spd->servloc_name,
+				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME))
+				&& (!strcmp(pdr->domain_list[i].name,
+				SLPI_SENSORPD_NAME))) {
 			goto pdr_register;
 		}
 	}
@@ -3608,19 +3631,24 @@
 			pdr->domain_list[i].name,
 			pdr->domain_list[i].instance_id,
 			&spd->pdrnb, &curr_state);
+		if (IS_ERR_OR_NULL(spd->pdrhandle))
+			pr_warn("adsprpc: %s: PDR notifier register failed for %s (%s) with err %d\n",
+				__func__, pdr->domain_list[i].name,
+				spd->servloc_name, PTR_ERR(spd->pdrhandle));
+		else
+			pr_info("adsprpc: %s: PDR notifier registered for %s (%s)\n",
+			__func__, pdr->domain_list[i].name, spd->servloc_name);
 	} else {
-		pr_err("adsprpc: %s is already registered\n", spd->spdname);
+		pr_warn("adsprpc: %s: %s (%s) notifier is already registered\n",
+			__func__, pdr->domain_list[i].name, spd->servloc_name);
 	}
 
-	if (IS_ERR(spd->pdrhandle))
-		pr_err("adsprpc: Unable to register notifier\n");
-
 	if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UP_V01) {
-		pr_info("adsprpc: %s: %s is up\n", __func__, spd->spdname);
+		pr_debug("adsprpc: %s: %s (%s) PDR service is up\n",
+			__func__, spd->servloc_name, pdr->domain_list[i].name);
 		spd->ispdup = 1;
 	} else if (curr_state == SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) {
-		pr_info("adsprpc: %s: %s is uninitialzed\n",
-			__func__, spd->spdname);
+		spd->ispdup = 0;
 	}
 	return NOTIFY_DONE;
 }
@@ -3791,6 +3819,7 @@
 	uint32_t val;
 	int ret = 0;
 	uint32_t secure_domains;
+	int session = -1, cid = -1;
 
 	if (of_device_is_compatible(dev->of_node,
 					"qcom,msm-fastrpc-compute")) {
@@ -3861,45 +3890,50 @@
 					"qcom,fastrpc-legacy-remote-heap");
 	if (of_property_read_bool(dev->of_node,
 					"qcom,fastrpc-adsp-audio-pdr")) {
-		int session;
-
-		VERIFY(err, !fastrpc_get_spd_session(
-			AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
+		err = fastrpc_get_spd_session(
+			AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session, &cid);
 		if (err)
 			goto spdbail;
-		me->channel[0].spd[session].get_service_nb.notifier_call =
+		me->channel[cid].spd[session].get_service_nb.notifier_call =
 					fastrpc_get_service_location_notify;
 		ret = get_service_location(
 				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
 				AUDIO_PDR_ADSP_SERVICE_NAME,
-				&me->channel[0].spd[session].get_service_nb);
+				&me->channel[cid].spd[session].get_service_nb);
 		if (ret)
-			pr_err("adsprpc: %s: getting ADSP service location failed with %d\n",
-					__func__, ret);
+			pr_warn("adsprpc: %s: get service location failed with %d for %s (%s)\n",
+				__func__, ret, AUDIO_PDR_ADSP_SERVICE_NAME,
+				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME);
+		else
+			pr_debug("adsprpc: %s: service location enabled for %s (%s)\n",
+				__func__, AUDIO_PDR_ADSP_SERVICE_NAME,
+				AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME);
 	}
 	if (of_property_read_bool(dev->of_node,
 					"qcom,fastrpc-adsp-sensors-pdr")) {
-		int session;
-
-		VERIFY(err, !fastrpc_get_spd_session(
-			SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session));
+		err = fastrpc_get_spd_session(
+		SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME, &session, &cid);
 		if (err)
 			goto spdbail;
-		me->channel[0].spd[session].get_service_nb.notifier_call =
+		me->channel[cid].spd[session].get_service_nb.notifier_call =
 					fastrpc_get_service_location_notify;
 		ret = get_service_location(
 				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME,
-				SENSORS_PDR_ADSP_SERVICE_NAME,
-				&me->channel[0].spd[session].get_service_nb);
+				SENSORS_PDR_SLPI_SERVICE_NAME,
+				&me->channel[cid].spd[session].get_service_nb);
 		if (ret)
-			pr_err("adsprpc: %s: getting sensors service location failed with %d\n",
-					__func__, ret);
+			pr_warn("adsprpc: %s: get service location failed with %d for %s (%s)\n",
+				__func__, ret, SENSORS_PDR_SLPI_SERVICE_NAME,
+				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME);
+		else
+			pr_debug("adsprpc: %s: service location enabled for %s (%s)\n",
+				__func__, SENSORS_PDR_SLPI_SERVICE_NAME,
+				SENSORS_PDR_SERVICE_LOCATION_CLIENT_NAME);
 	}
 spdbail:
-	err = 0;
-	VERIFY(err, !of_platform_populate(pdev->dev.of_node,
+	err = of_platform_populate(pdev->dev.of_node,
 					  fastrpc_match_table,
-					  NULL, &pdev->dev));
+					  NULL, &pdev->dev);
 	if (err)
 		goto bail;
 bail:
@@ -4012,11 +4046,18 @@
 		me->channel[i].prevssrcount = 0;
 		me->channel[i].issubsystemup = 1;
 		me->channel[i].ramdumpenabled = 0;
-		me->channel[i].remoteheap_ramdump_dev = NULL;
+		me->channel[i].rh_dump_dev = NULL;
 		me->channel[i].nb.notifier_call = fastrpc_restart_notifier_cb;
 		me->channel[i].handle = subsys_notif_register_notifier(
 							gcinfo[i].subsys,
 							&me->channel[i].nb);
+		if (IS_ERR_OR_NULL(me->channel[i].handle))
+			pr_warn("adsprpc: %s: SSR notifier register failed for %s with err %d\n",
+				__func__, gcinfo[i].subsys,
+				PTR_ERR(me->channel[i].handle));
+		else
+			pr_info("adsprpc: %s: SSR notifier registered for %s\n",
+				__func__, gcinfo[i].subsys);
 	}
 
 	err = register_rpmsg_driver(&fastrpc_rpmsg_client);
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 5dd86a7..db67e08 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -73,6 +73,13 @@
 		.send_event_mask = diag_send_dci_event_mask_remote,
 		.peripheral_status = 0,
 		.mempool = POOL_TYPE_MDM_DCI_WRITE,
+	},
+	{
+		.ctx = DIAGFWD_MDM_DCI_2,
+		.send_log_mask = diag_send_dci_log_mask_remote,
+		.send_event_mask = diag_send_dci_event_mask_remote,
+		.peripheral_status = 0,
+		.mempool = POOL_TYPE_MDM2_DCI_WRITE,
 	}
 #endif
 };
@@ -593,7 +600,7 @@
 		 * (1 byte) + version (1 byte) + length (2 bytes)
 		 */
 		err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
-						 DCI_REMOTE_DATA, DCI_MDM_PROC);
+						 DCI_REMOTE_DATA, token);
 		if (err)
 			break;
 		read_bytes += header_len + dci_pkt_len;
@@ -2963,6 +2970,7 @@
 		new_entry->num_buffers = NUM_DCI_PERIPHERALS;
 		break;
 	case DCI_MDM_PROC:
+	case DCI_MDM_2_PROC:
 		new_entry->num_buffers = 1;
 		break;
 	}
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index 3acde54..5817066 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
  */
 #ifndef DIAG_DCI_H
 #define DIAG_DCI_H
@@ -58,7 +58,8 @@
 #define DCI_LOCAL_PROC		0
 #define DCI_REMOTE_BASE		1
 #define DCI_MDM_PROC		DCI_REMOTE_BASE
-#define DCI_REMOTE_LAST		(DCI_REMOTE_BASE + 1)
+#define DCI_MDM_2_PROC		(DCI_REMOTE_BASE + 1)
+#define DCI_REMOTE_LAST		(DCI_REMOTE_BASE + 2)
 
 #ifndef CONFIG_DIAGFWD_BRIDGE_CODE
 #define NUM_DCI_PROC		1
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index 8dcab0d..8f5011f 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifdef CONFIG_DEBUG_FS
@@ -789,7 +789,8 @@
 {
 	char *buf = NULL;
 	int ret = 0;
-	int i = 0;
+	int ch_idx = 0;
+	int dev_idx = 0;
 	unsigned int buf_size;
 	unsigned int bytes_remaining = 0;
 	unsigned int bytes_written = 0;
@@ -810,23 +811,27 @@
 
 	buf_size = ksize(buf);
 	bytes_remaining = buf_size;
-	for (i = diag_dbgfs_mhiinfo_index; i < NUM_MHI_DEV; i++) {
-		mhi_info = &diag_mhi[i];
-		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
-			"id: %d\n"
-			"name: %s\n"
-			"enabled %d\n"
-			"bridge index: %s\n"
-			"mempool: %s\n"
-			"read ch opened: %d\n"
-			"write ch opened: %d\n"
-			"read work pending: %d\n"
-			"read done work pending: %d\n"
-			"open work pending: %d\n"
-			"close work pending: %d\n\n",
-			mhi_info->id,
-			mhi_info->name,
-			mhi_info->enabled,
+	for (dev_idx = diag_dbgfs_mhiinfo_index; dev_idx < NUM_MHI_DEV;
+								dev_idx++) {
+		for (ch_idx = diag_dbgfs_mhiinfo_index; ch_idx < NUM_MHI_DEV;
+								ch_idx++) {
+			mhi_info = &diag_mhi[dev_idx][ch_idx];
+			bytes_written = scnprintf(buf+bytes_in_buffer,
+						bytes_remaining,
+						"id: %d\n"
+						"name: %s\n"
+						"enabled %d\n"
+						"bridge index: %s\n"
+						"mempool: %s\n"
+						"read ch opened: %d\n"
+						"write ch opened: %d\n"
+						"read work pending: %d\n"
+						"read done work pending: %d\n"
+						"open work pending: %d\n"
+						"close work pending: %d\n\n",
+						mhi_info->id,
+						mhi_info->name,
+						mhi_info->enabled,
 			DIAG_BRIDGE_GET_NAME(mhi_info->dev_id),
 			DIAG_MEMPOOL_GET_NAME(mhi_info->mempool),
 			atomic_read(&mhi_info->read_ch.opened),
@@ -835,15 +840,16 @@
 			work_pending(&mhi_info->read_done_work),
 			work_pending(&mhi_info->open_work),
 			work_pending(&mhi_info->close_work));
-		bytes_in_buffer += bytes_written;
+			bytes_in_buffer += bytes_written;
 
-		/* Check if there is room to add another table entry */
-		bytes_remaining = buf_size - bytes_in_buffer;
+			/* Check if there is room to add another table entry */
+			bytes_remaining = buf_size - bytes_in_buffer;
 
-		if (bytes_remaining < bytes_written)
-			break;
+			if (bytes_remaining < bytes_written)
+				break;
+		}
 	}
-	diag_dbgfs_mhiinfo_index = i+1;
+	diag_dbgfs_mhiinfo_index = dev_idx + 1;
 	*ppos = 0;
 	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
 
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
index 5b0722d..ab396c3 100644
--- a/drivers/char/diag/diagfwd_bridge.c
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -45,9 +45,9 @@
 		.dci_wq = NULL,
 	},
 	{
-		.id = DIAGFWD_SMUX,
+		.id = DIAGFWD_MDM2,
 		.type = DIAG_DATA_TYPE,
-		.name = "SMUX",
+		.name = "MDM_2",
 		.inited = 0,
 		.ctxt = 0,
 		.dci_read_ptr = NULL,
@@ -68,6 +68,18 @@
 		.dci_read_len = 0,
 		.dci_wq = NULL,
 	},
+	{
+		.id = DIAGFWD_MDM_DCI_2,
+		.type = DIAG_DCI_TYPE,
+		.name = "MDM_DCI_2",
+		.inited = 0,
+		.ctxt = 0,
+		.dci_read_ptr = NULL,
+		.dev_ops = NULL,
+		.dci_read_buf = NULL,
+		.dci_read_len = 0,
+		.dci_wq = NULL,
+	},
 };
 
 static int diagfwd_bridge_mux_connect(int id, int mode)
@@ -75,7 +87,7 @@
 	if (id < 0 || id >= NUM_REMOTE_DEV)
 		return -EINVAL;
 	if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->open)
-		bridge_info[id].dev_ops->open(bridge_info[id].ctxt);
+		bridge_info[id].dev_ops->open(id, bridge_info[id].ctxt);
 	return 0;
 }
 
@@ -101,7 +113,7 @@
 		return -EINVAL;
 	ch = &bridge_info[buf_ctx];
 	if (ch->dev_ops && ch->dev_ops->fwd_complete)
-		ch->dev_ops->fwd_complete(ch->ctxt, buf, len, 0);
+		ch->dev_ops->fwd_complete(ch->id, ch->ctxt, buf, len, 0);
 	return 0;
 }
 
@@ -122,7 +134,7 @@
 	diag_process_remote_dci_read_data(ch->id, ch->dci_read_buf,
 					  ch->dci_read_len);
 	if (ch->dev_ops && ch->dev_ops->fwd_complete) {
-		ch->dev_ops->fwd_complete(ch->ctxt, ch->dci_read_ptr,
+		ch->dev_ops->fwd_complete(ch->id, ch->ctxt, ch->dci_read_ptr,
 					  ch->dci_read_len, 0);
 	}
 }
@@ -134,7 +146,8 @@
 	char wq_name[DIAG_BRIDGE_NAME_SZ + 10];
 
 	if (!ops) {
-		pr_err("diag: Invalid pointers ops: %pK ctxt: %d\n", ops, ctxt);
+		pr_err("diag: Invalid pointers ops: %pK ctxt: %d id: %d\n",
+			ops, ctxt, id);
 		return -EINVAL;
 	}
 
@@ -201,7 +214,7 @@
 	if (ch->type == DIAG_DATA_TYPE) {
 		err = diag_mux_write(BRIDGE_TO_MUX(id), buf, len, id);
 		if (ch->dev_ops && ch->dev_ops->queue_read)
-			ch->dev_ops->queue_read(ch->ctxt);
+			ch->dev_ops->queue_read(id, ch->ctxt);
 		return err;
 	}
 	/*
@@ -277,7 +290,7 @@
 	if (id < 0 || id >= NUM_REMOTE_DEV)
 		return -EINVAL;
 	if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->close)
-		return bridge_info[id].dev_ops->close(bridge_info[id].ctxt);
+		return bridge_info[id].dev_ops->close(id, bridge_info[id].ctxt);
 	return 0;
 }
 
@@ -286,7 +299,7 @@
 	if (id < 0 || id >= NUM_REMOTE_DEV)
 		return -EINVAL;
 	if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->write) {
-		return bridge_info[id].dev_ops->write(bridge_info[id].ctxt,
+		return bridge_info[id].dev_ops->write(id, bridge_info[id].ctxt,
 						      buf, len, 0);
 	}
 	return 0;
@@ -301,7 +314,7 @@
 		if (bridge_info[i].inited &&
 		    bridge_info[i].type == DIAG_DATA_TYPE &&
 		    (bridge_info[i].dev_ops->remote_proc_check &&
-		    bridge_info[i].dev_ops->remote_proc_check())) {
+		    bridge_info[i].dev_ops->remote_proc_check(i))) {
 			remote_dev |= 1 << i;
 		}
 	}
diff --git a/drivers/char/diag/diagfwd_bridge.h b/drivers/char/diag/diagfwd_bridge.h
index 82645fe..8cb0374 100644
--- a/drivers/char/diag/diagfwd_bridge.h
+++ b/drivers/char/diag/diagfwd_bridge.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2012-2014, 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef DIAGFWD_BRIDGE_H
@@ -10,22 +10,25 @@
  * bottom half of this list.
  */
 #define DIAGFWD_MDM		0
-#define DIAGFWD_SMUX		1
+#define DIAGFWD_MDM2		1
 #define NUM_REMOTE_DATA_DEV	2
 #define DIAGFWD_MDM_DCI		NUM_REMOTE_DATA_DEV
-#define NUM_REMOTE_DCI_DEV	(DIAGFWD_MDM_DCI - NUM_REMOTE_DATA_DEV + 1)
+#define DIAGFWD_MDM_DCI_2	(NUM_REMOTE_DATA_DEV + 1)
+#define NUM_REMOTE_DCI_DEV	(DIAGFWD_MDM_DCI_2 - NUM_REMOTE_DATA_DEV + 1)
 #define NUM_REMOTE_DEV		(NUM_REMOTE_DATA_DEV + NUM_REMOTE_DCI_DEV)
 
 #define DIAG_BRIDGE_NAME_SZ	24
 #define DIAG_BRIDGE_GET_NAME(x)	(bridge_info[x].name)
 
 struct diag_remote_dev_ops {
-	int (*open)(int id);
-	int (*close)(int id);
-	int (*queue_read)(int id);
-	int (*write)(int id, unsigned char *buf, int len, int ctxt);
-	int (*fwd_complete)(int id, unsigned char *buf, int len, int ctxt);
-	int (*remote_proc_check)(void);
+	int (*open)(int id, int  ch);
+	int (*close)(int id, int ch);
+	int (*queue_read)(int id, int ch);
+	int (*write)(int id, int ch, unsigned char *buf,
+			int len, int ctxt);
+	int (*fwd_complete)(int id, int ch, unsigned char *buf,
+				int len, int ctxt);
+	int (*remote_proc_check)(int id);
 };
 
 struct diagfwd_bridge_info {
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 9fef6ac..b9d5051 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -1131,6 +1131,9 @@
 		if (peripheral > NUM_PERIPHERALS)
 			peripheral = diag_search_peripheral_by_pd(i);
 
+		if (peripheral < 0 || peripheral > NUM_PERIPHERALS)
+			continue;
+
 		if (!driver->feature[peripheral].peripheral_buffering)
 			continue;
 		switch (driver->buffering_mode[i].mode) {
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index be3212c..b2afd89 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -36,43 +36,93 @@
 
 #define DIAG_MHI_STRING_SZ		11
 
-struct diag_mhi_info diag_mhi[NUM_MHI_DEV] = {
+struct diag_mhi_info diag_mhi[NUM_MHI_DEV][NUM_MHI_CHAN] = {
 	{
-		.id = MHI_1,
-		.dev_id = DIAGFWD_MDM,
-		.name = "MDM",
-		.enabled = 0,
-		.num_read = 0,
-		.mempool = POOL_TYPE_MDM,
-		.mempool_init = 0,
-		.mhi_wq = NULL,
-		.mhi_dev = NULL,
-		.read_ch = {
-			.type = TYPE_MHI_READ_CH,
+		{
+			.id = MHI_1,
+			.dev_id = DIAGFWD_MDM,
+			.name = "MDM",
+			.enabled = 0,
+			.num_read = 0,
+			.mempool = POOL_TYPE_MDM,
+			.mempool_init = 0,
+			.mhi_wq = NULL,
+			.mhi_dev = NULL,
+			.read_ch = {
+				.type = TYPE_MHI_READ_CH,
+			},
+			.write_ch = {
+				.type = TYPE_MHI_WRITE_CH,
+			}
 		},
-		.write_ch = {
-			.type = TYPE_MHI_WRITE_CH,
+		{
+			.id = MHI_DCI_1,
+			.dev_id = DIAGFWD_MDM_DCI,
+			.name = "MDM_DCI",
+			.enabled = 0,
+			.num_read = 0,
+			.mempool = POOL_TYPE_MDM_DCI,
+			.mempool_init = 0,
+			.mhi_wq = NULL,
+			.mhi_dev = NULL,
+			.read_ch = {
+				.type = TYPE_MHI_READ_CH,
+			},
+			.write_ch = {
+				.type = TYPE_MHI_WRITE_CH,
+			}
 		}
 	},
 	{
-		.id = MHI_DCI_1,
-		.dev_id = DIAGFWD_MDM_DCI,
-		.name = "MDM_DCI",
-		.enabled = 0,
-		.num_read = 0,
-		.mempool = POOL_TYPE_MDM_DCI,
-		.mempool_init = 0,
-		.mhi_wq = NULL,
-		.mhi_dev = NULL,
-		.read_ch = {
-			.type = TYPE_MHI_READ_CH,
+		{
+			.id = MHI_1,
+			.dev_id = DIAGFWD_MDM2,
+			.name = "MDM_2",
+			.enabled = 0,
+			.num_read = 0,
+			.mempool = POOL_TYPE_MDM2,
+			.mempool_init = 0,
+			.mhi_wq = NULL,
+			.mhi_dev = NULL,
+			.read_ch = {
+				.type = TYPE_MHI_READ_CH,
+			},
+			.write_ch = {
+				.type = TYPE_MHI_WRITE_CH,
+			}
 		},
-		.write_ch = {
-			.type = TYPE_MHI_WRITE_CH,
+		{
+			.id = MHI_DCI_1,
+			.dev_id = DIAGFWD_MDM_DCI_2,
+			.name = "MDM_DCI_2",
+			.enabled = 0,
+			.num_read = 0,
+			.mempool = POOL_TYPE_MDM2_DCI,
+			.mempool_init = 0,
+			.mhi_wq = NULL,
+			.mhi_dev = NULL,
+			.read_ch = {
+				.type = TYPE_MHI_READ_CH,
+			},
+			.write_ch = {
+				.type = TYPE_MHI_WRITE_CH,
+			}
 		}
 	}
-};
 
+};
+static int get_id_from_token(int token)
+{
+	int ch_idx = 0;
+	int dev_idx = 0;
+
+	for (dev_idx = 0; dev_idx < NUM_MHI_DEV; dev_idx++)
+		for (ch_idx = 0; ch_idx < NUM_MHI_CHAN; ch_idx++)
+			if (diag_mhi[dev_idx][ch_idx].dev_id == token)
+				return ch_idx;
+
+	return -EINVAL;
+}
 static int mhi_buf_tbl_add(struct diag_mhi_info *mhi_info, int type,
 			   void *buf, int len)
 {
@@ -228,21 +278,26 @@
 	return 0;
 }
 
-static int mhi_close(int id)
+static int mhi_close(int token, int ch)
 {
-	if (id < 0 || id >= NUM_MHI_DEV) {
-		pr_err("diag: In %s, invalid index %d\n", __func__, id);
+	int dev_idx = get_id_from_token(token);
+
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
+		pr_err("diag: In %s, invalid index %d\n", __func__, dev_idx);
 		return -EINVAL;
 	}
 
-	if (!diag_mhi[id].enabled)
+	if (ch < 0 || ch >= NUM_MHI_CHAN)
+		pr_err("diag: In %s, invalid channel %d\n", __func__, ch);
+
+	if (!diag_mhi[dev_idx][ch].enabled)
 		return -ENODEV;
 	/*
 	 * This function is called whenever the channel needs to be closed
 	 * explicitly by Diag. Close both the read and write channels (denoted
 	 * by CLOSE_CHANNELS flag)
 	 */
-	return __mhi_close(&diag_mhi[id], CLOSE_CHANNELS);
+	return __mhi_close(&diag_mhi[dev_idx][ch], CLOSE_CHANNELS);
 }
 
 static void mhi_close_work_fn(struct work_struct *work)
@@ -259,7 +314,7 @@
 		__mhi_close(mhi_info, CHANNELS_CLOSED);
 }
 
-static int __mhi_open(struct diag_mhi_info *mhi_info, int open_flag)
+static int __mhi_open(struct diag_mhi_info *mhi_info, int token, int open_flag)
 {
 	int err = 0;
 
@@ -294,15 +349,21 @@
 	return 0;
 
 fail:
-	pr_err("diag: Failed to open mhi channlels, err: %d\n", err);
-	mhi_close(mhi_info->id);
+	mhi_close(token, mhi_info->id);
 	return err;
 }
 
-static int mhi_open(int id)
+static int mhi_open(int token, int ch)
 {
-	if (id < 0 || id >= NUM_MHI_DEV) {
-		pr_err("diag: In %s, invalid index %d\n", __func__, id);
+	int dev_idx = get_id_from_token(token);
+
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
+		pr_err("diag: In %s, invalid index %d\n", __func__, dev_idx);
+		return -EINVAL;
+	}
+
+	if (ch < 0 || ch >= NUM_MHI_CHAN) {
+		pr_err("diag: In %s, invalid ch %d\n", __func__, ch);
 		return -EINVAL;
 	}
 
@@ -311,9 +372,10 @@
 	 * explicitly by Diag. Open both the read and write channels (denoted by
 	 * OPEN_CHANNELS flag)
 	 */
-	__mhi_open(&diag_mhi[id], OPEN_CHANNELS);
-	diag_remote_dev_open(diag_mhi[id].dev_id);
-	queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+	__mhi_open(&diag_mhi[dev_idx][ch], token, OPEN_CHANNELS);
+	diag_remote_dev_open(diag_mhi[dev_idx][ch].dev_id);
+	queue_work(diag_mhi[dev_idx][ch].mhi_wq,
+			&(diag_mhi[dev_idx][ch].read_work));
 
 	return 0;
 }
@@ -441,64 +503,82 @@
 	queue_work(mhi_info->mhi_wq, &mhi_info->read_work);
 }
 
-static int mhi_queue_read(int id)
+static int mhi_queue_read(int token, int ch)
 {
-	if (id < 0 || id >= NUM_MHI_DEV) {
+	int dev_idx = get_id_from_token(token);
+
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
 		pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
-				   id);
+				   dev_idx);
 		return -EINVAL;
 	}
-	queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+
+	if (ch < 0 || ch >= NUM_MHI_CHAN) {
+		pr_err_ratelimited("diag: In %s, invalid chan %d\n", __func__,
+					ch);
+		return -EINVAL;
+	}
+	queue_work(diag_mhi[dev_idx][ch].mhi_wq,
+			&(diag_mhi[dev_idx][ch].read_work));
 	return 0;
 }
 
-static int mhi_write(int id, unsigned char *buf, int len, int ctxt)
+static int mhi_write(int token, int ch, unsigned char *buf, int len, int ctxt)
 {
 	int err = 0;
 	enum MHI_FLAGS mhi_flags = MHI_EOT;
 	unsigned long flags;
-	struct diag_mhi_ch_t *ch = NULL;
+	struct diag_mhi_ch_t *ch_info = NULL;
+	int dev_idx = get_id_from_token(token);
 
-	if (id < 0 || id >= NUM_MHI_DEV) {
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
 		pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
-				   id);
+				   dev_idx);
+		return -EINVAL;
+	}
+
+	if (ch < 0 || ch >= NUM_MHI_CHAN) {
+		pr_err_ratelimited("diag: In %s, invalid chan %d\n", __func__,
+				   ch);
 		return -EINVAL;
 	}
 
 	if (!buf || len <= 0) {
 		pr_err("diag: In %s, ch %d, invalid buf %pK len %d\n",
-			__func__, id, buf, len);
+			__func__, dev_idx, buf, len);
 		return -EINVAL;
 	}
 
-	if (!diag_mhi[id].enabled) {
+	if (!diag_mhi[dev_idx][ch].enabled) {
 		pr_err_ratelimited("diag: In %s, MHI channel %s is not enabled\n",
-				   __func__, diag_mhi[id].name);
+				   __func__, diag_mhi[dev_idx][ch].name);
 		return -EIO;
 	}
 
-	ch = &diag_mhi[id].write_ch;
-	if (!(atomic_read(&(ch->opened)))) {
+	ch_info = &diag_mhi[dev_idx][ch].write_ch;
+	if (!(atomic_read(&(ch_info->opened)))) {
 		pr_err_ratelimited("diag: In %s, MHI write channel %s is not open\n",
-				   __func__, diag_mhi[id].name);
+				   __func__, diag_mhi[dev_idx][ch].name);
 		return -EIO;
 	}
 
-	spin_lock_irqsave(&ch->lock, flags);
-	err = mhi_buf_tbl_add(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf,
+	spin_lock_irqsave(&ch_info->lock, flags);
+	err = mhi_buf_tbl_add(&diag_mhi[dev_idx][ch], TYPE_MHI_WRITE_CH, buf,
 			      len);
 	if (err) {
-		spin_unlock_irqrestore(&ch->lock, flags);
+		spin_unlock_irqrestore(&ch_info->lock, flags);
 		goto fail;
 	}
 
-	err = mhi_queue_transfer(diag_mhi[id].mhi_dev, DMA_TO_DEVICE, buf,
-				len, mhi_flags);
-	spin_unlock_irqrestore(&ch->lock, flags);
+	err = mhi_queue_transfer(diag_mhi[dev_idx][ch].mhi_dev, DMA_TO_DEVICE,
+					buf, len, mhi_flags);
+	spin_unlock_irqrestore(&ch_info->lock, flags);
 	if (err) {
-		pr_err_ratelimited("diag: In %s, cannot write to MHI channel %pK, len %d, err: %d\n",
-				   __func__, diag_mhi[id].name, len, err);
-		mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf, len);
+		pr_err_ratelimited("diag: In %s, cannot write to MHI channel %s, len %d, err: %d\n",
+					__func__, diag_mhi[dev_idx][ch].name,
+					len, err);
+		mhi_buf_tbl_remove(&diag_mhi[dev_idx][ch], TYPE_MHI_WRITE_CH,
+					buf, len);
 		goto fail;
 	}
 
@@ -507,36 +587,54 @@
 	return err;
 }
 
-static int mhi_fwd_complete(int id, unsigned char *buf, int len, int ctxt)
+static int mhi_fwd_complete(int token, int ch, unsigned char *buf,
+				int len, int ctxt)
 {
-	if (id < 0 || id >= NUM_MHI_DEV) {
+	int dev_idx = get_id_from_token(token);
+
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
 		pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
-				   id);
+				   dev_idx);
 		return -EINVAL;
 	}
 
+	if (ch < 0 || ch >= NUM_MHI_CHAN) {
+		pr_err_ratelimited("diag: In %s, invalid chan %d\n", __func__,
+				   ch);
+		return -EINVAL;
+	}
 	if (!buf)
 		return -EINVAL;
 
-	mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_READ_CH, buf, len);
-	queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+	mhi_buf_tbl_remove(&diag_mhi[dev_idx][ch], TYPE_MHI_READ_CH,
+				buf, len);
+	queue_work(diag_mhi[dev_idx][ch].mhi_wq,
+			&(diag_mhi[dev_idx][ch].read_work));
 	return 0;
 }
 
-static int mhi_remote_proc_check(void)
+static int mhi_remote_proc_check(int token)
 {
-	return diag_mhi[MHI_1].enabled;
+	int dev_idx = get_id_from_token(token);
+
+	if (dev_idx >= 0 && dev_idx < NUM_MHI_DEV)
+		return diag_mhi[dev_idx][MHI_1].enabled;
+	else
+		return 0;
 }
 
 static struct diag_mhi_info *diag_get_mhi_info(struct mhi_device *mhi_dev)
 {
 	struct diag_mhi_info *mhi_info = NULL;
-	int i;
+	int ch;
+	int dev_idx;
 
-	for (i = 0; i < NUM_MHI_DEV; i++) {
-		mhi_info = &diag_mhi[i];
-		if (mhi_info->mhi_dev == mhi_dev)
-			return mhi_info;
+	for (dev_idx = 0; dev_idx < NUM_MHI_DEV; dev_idx++) {
+		for (ch = 0; ch < NUM_MHI_CHAN; ch++) {
+			mhi_info = &diag_mhi[dev_idx][ch];
+			if (mhi_info->mhi_dev == mhi_dev)
+				return mhi_info;
+		}
 	}
 	return NULL;
 }
@@ -635,22 +733,46 @@
 static int diag_mhi_probe(struct mhi_device *mhi_dev,
 			const struct mhi_device_id *id)
 {
-	int index = id->driver_data;
+	int dev_idx;
+	int ch = id->driver_data;
 	unsigned long flags;
-	struct diag_mhi_info *mhi_info = &diag_mhi[index];
+	struct diag_mhi_info *mhi_info;
 
+	switch (mhi_dev->dev_id) {
+	case MHI_DEV_ID_1:
+		dev_idx = 0;
+		break;
+	case MHI_DEV_ID_2:
+		dev_idx = 1;
+		break;
+	default:
+		return 0;
+	}
+
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
+		pr_err_ratelimited(" In %s invalid dev index %d\n", __func__,
+					dev_idx);
+		return 0;
+	}
+
+	if (ch < 0 || ch >= NUM_MHI_CHAN) {
+		pr_err_ratelimited(" In %s invalid channel %d\n", __func__, ch);
+		return 0;
+	}
+
+	mhi_info = &diag_mhi[dev_idx][ch];
 	DIAG_LOG(DIAG_DEBUG_BRIDGE,
-		"received probe for %d\n",
-		index);
-	diag_mhi[index].mhi_dev = mhi_dev;
+		"received probe for dev:%d ch:%d\n",
+		dev_idx, ch);
+	mhi_info->mhi_dev = mhi_dev;
 	DIAG_LOG(DIAG_DEBUG_BRIDGE,
 		"diag: mhi device is ready to open\n");
 	spin_lock_irqsave(&mhi_info->lock, flags);
 	mhi_info->enabled = 1;
 	spin_unlock_irqrestore(&mhi_info->lock, flags);
-	__mhi_open(&diag_mhi[index], OPEN_CHANNELS);
-	queue_work(diag_mhi[index].mhi_wq,
-			   &(diag_mhi[index].open_work));
+	__mhi_open(mhi_info, mhi_info->dev_id, OPEN_CHANNELS);
+	queue_work(diag_mhi[dev_idx][ch].mhi_wq,
+			   &(diag_mhi[dev_idx][ch].open_work));
 	return 0;
 }
 
@@ -663,70 +785,87 @@
 	.remote_proc_check = mhi_remote_proc_check,
 };
 
-static void diag_mhi_dev_exit(int dev)
+static void diag_mhi_dev_exit(int dev_idx, int ch)
 {
 	struct diag_mhi_info *mhi_info = NULL;
 
-	mhi_info = &diag_mhi[dev];
+	if (dev_idx < 0 || dev_idx >= NUM_MHI_DEV) {
+		pr_err_ratelimited(" In %s invalid dev index %d\n", __func__,
+					dev_idx);
+		return;
+	}
+
+	if (ch < 0 || ch >= NUM_MHI_CHAN) {
+		pr_err_ratelimited(" In %s invalid channel %d\n", __func__, ch);
+		return;
+	}
+
+	mhi_info = &diag_mhi[dev_idx][ch];
 	if (!mhi_info)
 		return;
 	if (mhi_info->mhi_wq)
 		destroy_workqueue(mhi_info->mhi_wq);
-	mhi_close(mhi_info->id);
+	mhi_close(mhi_info->dev_id, mhi_info->id);
 	if (mhi_info->mempool_init)
 		diagmem_exit(driver, mhi_info->mempool);
 }
 
 int diag_mhi_init(void)
 {
-	int i;
-	int err = 0;
+	int ch, dev_idx, err = 0;
 	struct diag_mhi_info *mhi_info = NULL;
 	char wq_name[DIAG_MHI_NAME_SZ + DIAG_MHI_STRING_SZ];
 
-	for (i = 0; i < NUM_MHI_DEV; i++) {
-		mhi_info = &diag_mhi[i];
-		spin_lock_init(&mhi_info->lock);
-		spin_lock_init(&mhi_info->read_ch.lock);
-		spin_lock_init(&mhi_info->write_ch.lock);
-		INIT_LIST_HEAD(&mhi_info->read_ch.buf_tbl);
-		INIT_LIST_HEAD(&mhi_info->write_ch.buf_tbl);
-		atomic_set(&(mhi_info->read_ch.opened), 0);
-		atomic_set(&(mhi_info->write_ch.opened), 0);
-		INIT_WORK(&(mhi_info->read_work), mhi_read_work_fn);
-		INIT_LIST_HEAD(&mhi_info->read_done_list);
-		INIT_WORK(&(mhi_info->read_done_work), mhi_read_done_work_fn);
-		INIT_WORK(&(mhi_info->open_work), mhi_open_work_fn);
-		INIT_WORK(&(mhi_info->close_work), mhi_close_work_fn);
-		strlcpy(wq_name, "diag_mhi_", sizeof(wq_name));
-		strlcat(wq_name, mhi_info->name, sizeof(wq_name));
-		diagmem_init(driver, mhi_info->mempool);
-		mhi_info->mempool_init = 1;
-		mhi_info->mhi_wq = create_singlethread_workqueue(wq_name);
-		if (!mhi_info->mhi_wq)
-			goto fail;
-		err = diagfwd_bridge_register(mhi_info->dev_id, mhi_info->id,
-					      &diag_mhi_fwd_ops);
-		if (err) {
-			pr_err("diag: Unable to register MHI channel %d with bridge, err: %d\n",
-			       i, err);
-			goto fail;
+	for (dev_idx = 0; dev_idx < NUM_MHI_DEV; dev_idx++) {
+		for (ch = 0; ch < NUM_MHI_CHAN; ch++) {
+			mhi_info = &diag_mhi[dev_idx][ch];
+			spin_lock_init(&mhi_info->lock);
+			spin_lock_init(&mhi_info->read_ch.lock);
+			spin_lock_init(&mhi_info->write_ch.lock);
+			INIT_LIST_HEAD(&mhi_info->read_ch.buf_tbl);
+			INIT_LIST_HEAD(&mhi_info->write_ch.buf_tbl);
+			atomic_set(&(mhi_info->read_ch.opened), 0);
+			atomic_set(&(mhi_info->write_ch.opened), 0);
+			INIT_WORK(&(mhi_info->read_work), mhi_read_work_fn);
+			INIT_LIST_HEAD(&mhi_info->read_done_list);
+			INIT_WORK(&(mhi_info->read_done_work),
+					mhi_read_done_work_fn);
+			INIT_WORK(&(mhi_info->open_work), mhi_open_work_fn);
+			INIT_WORK(&(mhi_info->close_work), mhi_close_work_fn);
+			strlcpy(wq_name, "diag_mhi_", sizeof(wq_name));
+			strlcat(wq_name, mhi_info->name, sizeof(wq_name));
+			diagmem_init(driver, mhi_info->mempool);
+			mhi_info->mempool_init = 1;
+			mhi_info->mhi_wq =
+				create_singlethread_workqueue(wq_name);
+			if (!mhi_info->mhi_wq)
+				goto fail;
+			err = diagfwd_bridge_register(mhi_info->dev_id,
+							mhi_info->id,
+							&diag_mhi_fwd_ops);
+			if (err) {
+				pr_err("diag: Unable to register MHI channel %d with bridge dev:%d, err: %d\n",
+					ch, dev_idx, err);
+				goto fail;
+			}
+			DIAG_LOG(DIAG_DEBUG_BRIDGE,
+					"mhi dev %d port %d initialized\n",
+					dev_idx, ch);
 		}
-		DIAG_LOG(DIAG_DEBUG_BRIDGE, "mhi port %d is initailzed\n", i);
 	}
-
 	return 0;
 fail:
-	diag_mhi_dev_exit(i);
+	diag_mhi_dev_exit(dev_idx, ch);
 	return -ENOMEM;
 }
 
 void diag_mhi_exit(void)
 {
-	int i;
+	int ch, dev_idx;
 
-	for (i = 0; i < NUM_MHI_DEV; i++)
-		diag_mhi_dev_exit(i);
+	for (dev_idx = 0; dev_idx < NUM_MHI_DEV; dev_idx++)
+		for (ch = 0; ch < NUM_MHI_CHAN; ch++)
+			diag_mhi_dev_exit(dev_idx, ch);
 }
 
 static const struct mhi_device_id diag_mhi_match_table[] = {
diff --git a/drivers/char/diag/diagfwd_mhi.h b/drivers/char/diag/diagfwd_mhi.h
index 3a94109..0fcc6af 100644
--- a/drivers/char/diag/diagfwd_mhi.h
+++ b/drivers/char/diag/diagfwd_mhi.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef DIAGFWD_MHI_H
@@ -29,12 +29,17 @@
 #define MHI_1			0
 #define MHI_DCI_1		1
 #define NUM_MHI_DEV		2
+#define NUM_MHI_CHAN		2
 
 #define TYPE_MHI_READ_CH	0
 #define TYPE_MHI_WRITE_CH	1
 
 #define DIAG_MHI_NAME_SZ	24
 
+/* Below mhi  device ids are from mhi controller */
+#define MHI_DEV_ID_1 0x306
+#define MHI_DEV_ID_2 0x1101
+
 struct diag_mhi_buf_tbl_t {
 	struct list_head link;
 	unsigned char *buf;
@@ -69,8 +74,7 @@
 	spinlock_t lock;
 };
 
-extern struct diag_mhi_info diag_mhi[NUM_MHI_DEV];
-
+extern struct diag_mhi_info diag_mhi[NUM_MHI_DEV][NUM_MHI_CHAN];
 int diag_mhi_init(void);
 void diag_mhi_exit(void);
 void diag_register_with_mhi(void);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index dac895d..2a7d6c3 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -307,6 +307,20 @@
 
 	  If unsure, say Y.
 
+config HW_RANDOM_MSM_LEGACY
+	tristate "QTI MSM Random Number Generator support (LEGACY)"
+	depends on HW_RANDOM && ARCH_QCOM
+	select CRYPTO_AES
+	select CRYPTO_ECB
+	help
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on QTI MSM SoCs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called msm_rng.
+
+	  If unsure, say Y.
+
 config HW_RANDOM_ST
 	tristate "ST Microelectronics HW Random Number Generator support"
 	depends on HW_RANDOM && ARCH_STI
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index e35ec3c..65eed19 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -29,6 +29,7 @@
 obj-$(CONFIG_HW_RANDOM_HISI)	+= hisi-rng.o
 obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
 obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
+obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += msm_rng.o
 obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
 obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
 obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c
new file mode 100644
index 0000000..4479b1d
--- /dev/null
+++ b/drivers/char/hw_random/msm_rng.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2011-2013, 2015, 2017-2019 The Linux Foundation. All rights
+ * reserved.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <soc/qcom/socinfo.h>
+#include <linux/msm-bus.h>
+#include <linux/qrng.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/internal/rng.h>
+
+#include <linux/sched/signal.h>
+
+#define DRIVER_NAME "msm_rng"
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT_OFFSET    0x0000
+#define PRNG_STATUS_OFFSET	0x0004
+#define PRNG_LFSR_CFG_OFFSET	0x0100
+#define PRNG_CONFIG_OFFSET	0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK	0xFFFF0000
+#define PRNG_LFSR_CFG_CLOCKS	0x0000DDDD
+#define PRNG_CONFIG_MASK	0xFFFFFFFD
+#define PRNG_HW_ENABLE		0x00000002
+
+#define MAX_HW_FIFO_DEPTH 16                     /* FIFO is 16 words deep */
+#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide  */
+
+#define RETRY_MAX_CNT		5	/* max retry times to read register */
+#define RETRY_DELAY_INTERVAL	440	/* retry delay interval in us */
+
+struct msm_rng_device {
+	struct platform_device *pdev;
+	void __iomem *base;
+	struct clk *prng_clk;
+	uint32_t qrng_perf_client;
+	struct mutex rng_lock;
+};
+
+struct msm_rng_device msm_rng_device_info;
+static struct msm_rng_device *msm_rng_dev_cached;
+struct mutex cached_rng_lock;
+static long msm_rng_ioctl(struct file *filp, unsigned int cmd,
+				unsigned long arg)
+{
+	long ret = 0;
+
+	switch (cmd) {
+	case QRNG_IOCTL_RESET_BUS_BANDWIDTH:
+		pr_debug("calling msm_rng_bus_scale(LOW)\n");
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_device_info.qrng_perf_client, 0);
+		if (ret)
+			pr_err("failed qrng_reset_bus_bw, ret = %ld\n", ret);
+		break;
+	default:
+		pr_err("Unsupported IOCTL call\n");
+		break;
+	}
+	return ret;
+}
+
+/*
+ *
+ *  This function calls hardware random bit generator directory and retuns it
+ *  back to caller
+ *
+ */
+static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev,
+					void *data, size_t max)
+{
+	struct platform_device *pdev;
+	void __iomem *base;
+	size_t currsize = 0;
+	u32 val = 0;
+	u32 *retdata = data;
+	int ret;
+	int failed = 0;
+
+	pdev = msm_rng_dev->pdev;
+	base = msm_rng_dev->base;
+
+	/* no room for word data */
+	if (max < 4)
+		return 0;
+
+	mutex_lock(&msm_rng_dev->rng_lock);
+
+	if (msm_rng_dev->qrng_perf_client) {
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_dev->qrng_perf_client, 1);
+		if (ret) {
+			pr_err("bus_scale_client_update_req failed\n");
+			goto bus_err;
+		}
+	}
+	/* enable PRNG clock */
+	if (msm_rng_dev->prng_clk) {
+		ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+		if (ret) {
+			pr_err("failed to enable prng clock\n");
+			goto err;
+		}
+	}
+	/* read random data from h/w */
+	do {
+		/* check status bit if data is available */
+		if (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
+				& 0x00000001)) {
+			if (failed++ == RETRY_MAX_CNT) {
+				if (currsize == 0)
+					pr_err("Data not available\n");
+				break;
+			}
+			udelay(RETRY_DELAY_INTERVAL);
+		} else {
+
+			/* read FIFO */
+			val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
+
+			/* write data back to callers pointer */
+			*(retdata++) = val;
+			currsize += 4;
+			/* make sure we stay on 32bit boundary */
+			if ((max - currsize) < 4)
+				break;
+		}
+
+	} while (currsize < max);
+
+	/* vote to turn off clock */
+	if (msm_rng_dev->prng_clk)
+		clk_disable_unprepare(msm_rng_dev->prng_clk);
+err:
+	if (msm_rng_dev->qrng_perf_client) {
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_dev->qrng_perf_client, 0);
+		if (ret)
+			pr_err("bus_scale_client_update_req failed\n");
+	}
+bus_err:
+	mutex_unlock(&msm_rng_dev->rng_lock);
+
+	val = 0L;
+	return currsize;
+}
+static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct msm_rng_device *msm_rng_dev;
+	int rv = 0;
+
+	msm_rng_dev = (struct msm_rng_device *)rng->priv;
+	rv = msm_rng_direct_read(msm_rng_dev, data, max);
+
+	return rv;
+}
+
+
+static struct hwrng msm_rng = {
+	.name = DRIVER_NAME,
+	.read = msm_rng_read,
+	.quality = 1024,
+};
+
+static int msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev)
+{
+	unsigned long val = 0;
+	unsigned long reg_val = 0;
+	int ret = 0;
+
+	if (msm_rng_dev->qrng_perf_client) {
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_dev->qrng_perf_client, 1);
+		if (ret)
+			pr_err("bus_scale_client_update_req failed\n");
+	}
+	/* Enable the PRNG CLK */
+	if (msm_rng_dev->prng_clk) {
+		ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+		if (ret) {
+			dev_err(&(msm_rng_dev->pdev)->dev,
+				"failed to enable clock in probe\n");
+			return -EPERM;
+		}
+	}
+
+	/* Enable PRNG h/w only if it is NOT ON */
+	val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) &
+					PRNG_HW_ENABLE;
+	/* PRNG H/W is not ON */
+	if (val != PRNG_HW_ENABLE) {
+		val = readl_relaxed(msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+		val &= PRNG_LFSR_CFG_MASK;
+		val |= PRNG_LFSR_CFG_CLOCKS;
+		writel_relaxed(val, msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+
+		/* The PRNG CONFIG register should be first written */
+		mb();
+
+		reg_val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET)
+						& PRNG_CONFIG_MASK;
+		reg_val |= PRNG_HW_ENABLE;
+		writel_relaxed(reg_val, msm_rng_dev->base + PRNG_CONFIG_OFFSET);
+
+		/* The PRNG clk should be disabled only after we enable the
+		 * PRNG h/w by writing to the PRNG CONFIG register.
+		 */
+		mb();
+	}
+	if (msm_rng_dev->prng_clk)
+		clk_disable_unprepare(msm_rng_dev->prng_clk);
+
+	if (msm_rng_dev->qrng_perf_client) {
+		ret = msm_bus_scale_client_update_request(
+				msm_rng_dev->qrng_perf_client, 0);
+		if (ret)
+			pr_err("bus_scale_client_update_req failed\n");
+	}
+
+	return 0;
+}
+
+static const struct file_operations msm_rng_fops = {
+	.unlocked_ioctl = msm_rng_ioctl,
+};
+static struct class *msm_rng_class;
+static struct cdev msm_rng_cdev;
+
+static int msm_rng_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct msm_rng_device *msm_rng_dev = NULL;
+	void __iomem *base = NULL;
+	bool configure_qrng = true;
+	int error = 0;
+	int ret = 0;
+	struct device *dev;
+
+	struct msm_bus_scale_pdata *qrng_platform_support = NULL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "invalid address\n");
+		error = -EFAULT;
+		goto err_exit;
+	}
+
+	msm_rng_dev = kzalloc(sizeof(struct msm_rng_device), GFP_KERNEL);
+	if (!msm_rng_dev) {
+		error = -ENOMEM;
+		goto err_exit;
+	}
+
+	base = ioremap(res->start, resource_size(res));
+	if (!base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		error = -ENOMEM;
+		goto err_iomap;
+	}
+	msm_rng_dev->base = base;
+
+	/* create a handle for clock control */
+	if (pdev->dev.of_node) {
+		if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,no-clock-support")) {
+			msm_rng_dev->prng_clk = NULL;
+		} else {
+			if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,msm-rng-iface-clk")) {
+				msm_rng_dev->prng_clk = clk_get(&pdev->dev,
+							"iface_clk");
+			} else {
+				msm_rng_dev->prng_clk = clk_get(&pdev->dev,
+							 "core_clk");
+			}
+		}
+	}
+
+	if (IS_ERR(msm_rng_dev->prng_clk)) {
+		dev_err(&pdev->dev, "failed to register clock source\n");
+		error = -EPERM;
+		goto err_clk_get;
+	}
+
+	/* save away pdev and register driver data */
+	msm_rng_dev->pdev = pdev;
+	platform_set_drvdata(pdev, msm_rng_dev);
+
+	if (pdev->dev.of_node) {
+		/* Register bus client */
+		qrng_platform_support = msm_bus_cl_get_pdata(pdev);
+		msm_rng_dev->qrng_perf_client = msm_bus_scale_register_client(
+						qrng_platform_support);
+		msm_rng_device_info.qrng_perf_client =
+					msm_rng_dev->qrng_perf_client;
+		if (!msm_rng_dev->qrng_perf_client)
+			pr_err("Unable to register bus client\n");
+	}
+
+	/* Enable rng h/w for the targets which can access the entire
+	 * address space of PRNG.
+	 */
+	if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
+					"qcom,no-qrng-config")))
+		configure_qrng = false;
+	if (configure_qrng) {
+		error = msm_rng_enable_hw(msm_rng_dev);
+		if (error)
+			goto rollback_clk;
+	}
+
+	mutex_init(&msm_rng_dev->rng_lock);
+	mutex_init(&cached_rng_lock);
+
+	/* register with hwrng framework */
+	msm_rng.priv = (unsigned long) msm_rng_dev;
+	error = hwrng_register(&msm_rng);
+	if (error) {
+		dev_err(&pdev->dev, "failed to register hwrng\n");
+		error = -EPERM;
+		goto rollback_clk;
+	}
+	ret = register_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME, &msm_rng_fops);
+
+	msm_rng_class = class_create(THIS_MODULE, "msm-rng");
+	if (IS_ERR(msm_rng_class)) {
+		pr_err("class_create failed\n");
+		return PTR_ERR(msm_rng_class);
+	}
+
+	dev = device_create(msm_rng_class, NULL, MKDEV(QRNG_IOC_MAGIC, 0),
+				NULL, "msm-rng");
+	if (IS_ERR(dev)) {
+		pr_err("Device create failed\n");
+		error = PTR_ERR(dev);
+		goto unregister_chrdev;
+	}
+	cdev_init(&msm_rng_cdev, &msm_rng_fops);
+	msm_rng_dev_cached = msm_rng_dev;
+	return error;
+
+unregister_chrdev:
+	unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+rollback_clk:
+	if (msm_rng_dev->prng_clk)
+		clk_put(msm_rng_dev->prng_clk);
+err_clk_get:
+	iounmap(msm_rng_dev->base);
+err_iomap:
+	kzfree(msm_rng_dev);
+err_exit:
+	return error;
+}
+
+static int msm_rng_remove(struct platform_device *pdev)
+{
+	struct msm_rng_device *msm_rng_dev = platform_get_drvdata(pdev);
+
+	unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+	hwrng_unregister(&msm_rng);
+	if (msm_rng_dev->prng_clk)
+		clk_put(msm_rng_dev->prng_clk);
+	iounmap(msm_rng_dev->base);
+	platform_set_drvdata(pdev, NULL);
+	if (msm_rng_dev->qrng_perf_client)
+		msm_bus_scale_unregister_client(msm_rng_dev->qrng_perf_client);
+
+	kzfree(msm_rng_dev);
+	msm_rng_dev_cached = NULL;
+	return 0;
+}
+
+static int qrng_get_random(struct crypto_rng *tfm, const u8 *src,
+				unsigned int slen, u8 *rdata,
+				unsigned int dlen)
+{
+	int sizeread = 0;
+	int rv = -EFAULT;
+
+	if (!msm_rng_dev_cached) {
+		pr_err("%s: msm_rng_dev is not initialized\n", __func__);
+		rv = -ENODEV;
+		goto err_exit;
+	}
+
+	if (!rdata) {
+		pr_err("%s: data buffer is null\n", __func__);
+		rv = -EINVAL;
+		goto err_exit;
+	}
+
+	if (signal_pending(current) ||
+		mutex_lock_interruptible(&cached_rng_lock)) {
+		pr_err("%s: mutex lock interrupted\n", __func__);
+		rv = -ERESTARTSYS;
+		goto err_exit;
+	}
+	sizeread = msm_rng_direct_read(msm_rng_dev_cached, rdata, dlen);
+
+	if (sizeread == dlen)
+		rv = 0;
+
+	mutex_unlock(&cached_rng_lock);
+err_exit:
+	return rv;
+
+}
+
+static int qrng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
+{
+	return 0;
+}
+
+static struct rng_alg rng_algs[] = { {
+	.generate	= qrng_get_random,
+	.seed		= qrng_reset,
+	.seedsize	= 0,
+	.base		= {
+		.cra_name		= "qrng",
+		.cra_driver_name	= "fips_hw_qrng",
+		.cra_priority		= 300,
+		.cra_ctxsize		= 0,
+		.cra_module		= THIS_MODULE,
+	}
+} };
+
+static const struct of_device_id qrng_match[] = {
+	{	.compatible = "qcom,msm-rng",
+	},
+	{},
+};
+
+static struct platform_driver rng_driver = {
+	.probe      = msm_rng_probe,
+	.remove     = msm_rng_remove,
+	.driver     = {
+		.name   = DRIVER_NAME,
+		.of_match_table = qrng_match,
+	},
+};
+
+static int __init msm_rng_init(void)
+{
+	int ret;
+
+	msm_rng_dev_cached = NULL;
+	ret = platform_driver_register(&rng_driver);
+	if (ret) {
+		pr_err("%s: platform_driver_register error:%d\n",
+			__func__, ret);
+		goto err_exit;
+	}
+	ret = crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+	if (ret) {
+		pr_err("%s: crypto_register_algs error:%d\n",
+			__func__, ret);
+		goto err_exit;
+	}
+
+err_exit:
+	return ret;
+}
+
+module_init(msm_rng_init);
+
+static void __exit msm_rng_exit(void)
+{
+	crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+	platform_driver_unregister(&rng_driver);
+}
+
+module_exit(msm_rng_exit);
+
+MODULE_DESCRIPTION("QTI MSM Random Number Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 7fc9612..d5f7a12 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -29,6 +29,7 @@
 #include <linux/moduleparam.h>
 #include <linux/workqueue.h>
 #include <linux/uuid.h>
+#include <linux/nospec.h>
 
 #define PFX "IPMI message handler: "
 
@@ -61,7 +62,8 @@
 { }
 #endif
 
-static int initialized;
+static bool initialized;
+static bool drvregistered;
 
 enum ipmi_panic_event_op {
 	IPMI_SEND_PANIC_EVENT_NONE,
@@ -611,7 +613,7 @@
 
 static LIST_HEAD(ipmi_interfaces);
 static DEFINE_MUTEX(ipmi_interfaces_mutex);
-DEFINE_STATIC_SRCU(ipmi_interfaces_srcu);
+struct srcu_struct ipmi_interfaces_srcu;
 
 /*
  * List of watchers that want to know when smi's are added and deleted.
@@ -719,7 +721,15 @@
 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
 {
 	struct ipmi_smi *intf;
-	int index;
+	int index, rv;
+
+	/*
+	 * Make sure the driver is actually initialized, this handles
+	 * problems with initialization order.
+	 */
+	rv = ipmi_init_msghandler();
+	if (rv)
+		return rv;
 
 	mutex_lock(&smi_watchers_mutex);
 
@@ -883,7 +893,7 @@
 
 		if (user) {
 			user->handler->ipmi_recv_hndl(msg, user->handler_data);
-			release_ipmi_user(msg->user, index);
+			release_ipmi_user(user, index);
 		} else {
 			/* User went away, give up. */
 			ipmi_free_recv_msg(msg);
@@ -1075,7 +1085,7 @@
 {
 	unsigned long flags;
 	struct ipmi_user *new_user;
-	int           rv = 0, index;
+	int           rv, index;
 	struct ipmi_smi *intf;
 
 	/*
@@ -1093,18 +1103,9 @@
 	 * Make sure the driver is actually initialized, this handles
 	 * problems with initialization order.
 	 */
-	if (!initialized) {
-		rv = ipmi_init_msghandler();
-		if (rv)
-			return rv;
-
-		/*
-		 * The init code doesn't return an error if it was turned
-		 * off, but it won't initialize.  Check that.
-		 */
-		if (!initialized)
-			return -ENODEV;
-	}
+	rv = ipmi_init_msghandler();
+	if (rv)
+		return rv;
 
 	new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
 	if (!new_user)
@@ -1182,6 +1183,7 @@
 static void free_user(struct kref *ref)
 {
 	struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+	cleanup_srcu_struct(&user->release_barrier);
 	kfree(user);
 }
 
@@ -1258,7 +1260,6 @@
 {
 	_ipmi_destroy_user(user);
 
-	cleanup_srcu_struct(&user->release_barrier);
 	kref_put(&user->refcount, free_user);
 
 	return 0;
@@ -1297,10 +1298,12 @@
 	if (!user)
 		return -ENODEV;
 
-	if (channel >= IPMI_MAX_CHANNELS)
+	if (channel >= IPMI_MAX_CHANNELS) {
 		rv = -EINVAL;
-	else
+	} else {
+		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
 		user->intf->addrinfo[channel].address = address;
+	}
 	release_ipmi_user(user, index);
 
 	return rv;
@@ -1317,10 +1320,12 @@
 	if (!user)
 		return -ENODEV;
 
-	if (channel >= IPMI_MAX_CHANNELS)
+	if (channel >= IPMI_MAX_CHANNELS) {
 		rv = -EINVAL;
-	else
+	} else {
+		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
 		*address = user->intf->addrinfo[channel].address;
+	}
 	release_ipmi_user(user, index);
 
 	return rv;
@@ -1337,10 +1342,12 @@
 	if (!user)
 		return -ENODEV;
 
-	if (channel >= IPMI_MAX_CHANNELS)
+	if (channel >= IPMI_MAX_CHANNELS) {
 		rv = -EINVAL;
-	else
+	} else {
+		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
 		user->intf->addrinfo[channel].lun = LUN & 0x3;
+	}
 	release_ipmi_user(user, index);
 
 	return 0;
@@ -1357,10 +1364,12 @@
 	if (!user)
 		return -ENODEV;
 
-	if (channel >= IPMI_MAX_CHANNELS)
+	if (channel >= IPMI_MAX_CHANNELS) {
 		rv = -EINVAL;
-	else
+	} else {
+		channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
 		*address = user->intf->addrinfo[channel].lun;
+	}
 	release_ipmi_user(user, index);
 
 	return rv;
@@ -2184,6 +2193,7 @@
 {
 	if (addr->channel >= IPMI_MAX_CHANNELS)
 		return -EINVAL;
+	addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
 	*lun = intf->addrinfo[addr->channel].lun;
 	*saddr = intf->addrinfo[addr->channel].address;
 	return 0;
@@ -3294,17 +3304,9 @@
 	 * Make sure the driver is actually initialized, this handles
 	 * problems with initialization order.
 	 */
-	if (!initialized) {
-		rv = ipmi_init_msghandler();
-		if (rv)
-			return rv;
-		/*
-		 * The init code doesn't return an error if it was turned
-		 * off, but it won't initialize.  Check that.
-		 */
-		if (!initialized)
-			return -ENODEV;
-	}
+	rv = ipmi_init_msghandler();
+	if (rv)
+		return rv;
 
 	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
 	if (!intf)
@@ -5020,6 +5022,22 @@
 	return NOTIFY_DONE;
 }
 
+/* Must be called with ipmi_interfaces_mutex held. */
+static int ipmi_register_driver(void)
+{
+	int rv;
+
+	if (drvregistered)
+		return 0;
+
+	rv = driver_register(&ipmidriver.driver);
+	if (rv)
+		pr_err("Could not register IPMI driver\n");
+	else
+		drvregistered = true;
+	return rv;
+}
+
 static struct notifier_block panic_block = {
 	.notifier_call	= panic_event,
 	.next		= NULL,
@@ -5030,66 +5048,74 @@
 {
 	int rv;
 
+	mutex_lock(&ipmi_interfaces_mutex);
+	rv = ipmi_register_driver();
+	if (rv)
+		goto out;
 	if (initialized)
-		return 0;
+		goto out;
 
-	rv = driver_register(&ipmidriver.driver);
-	if (rv) {
-		pr_err(PFX "Could not register IPMI driver\n");
-		return rv;
-	}
-
-	pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n");
+	init_srcu_struct(&ipmi_interfaces_srcu);
 
 	timer_setup(&ipmi_timer, ipmi_timeout, 0);
 	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
 
 	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
 
-	initialized = 1;
+	initialized = true;
 
-	return 0;
+out:
+	mutex_unlock(&ipmi_interfaces_mutex);
+	return rv;
 }
 
 static int __init ipmi_init_msghandler_mod(void)
 {
-	ipmi_init_msghandler();
-	return 0;
+	int rv;
+
+	pr_info("version " IPMI_DRIVER_VERSION "\n");
+
+	mutex_lock(&ipmi_interfaces_mutex);
+	rv = ipmi_register_driver();
+	mutex_unlock(&ipmi_interfaces_mutex);
+
+	return rv;
 }
 
 static void __exit cleanup_ipmi(void)
 {
 	int count;
 
-	if (!initialized)
-		return;
+	if (initialized) {
+		atomic_notifier_chain_unregister(&panic_notifier_list,
+						 &panic_block);
 
-	atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
+		/*
+		 * This can't be called if any interfaces exist, so no worry
+		 * about shutting down the interfaces.
+		 */
 
-	/*
-	 * This can't be called if any interfaces exist, so no worry
-	 * about shutting down the interfaces.
-	 */
+		/*
+		 * Tell the timer to stop, then wait for it to stop.  This
+		 * avoids problems with race conditions removing the timer
+		 * here.
+		 */
+		atomic_inc(&stop_operation);
+		del_timer_sync(&ipmi_timer);
 
-	/*
-	 * Tell the timer to stop, then wait for it to stop.  This
-	 * avoids problems with race conditions removing the timer
-	 * here.
-	 */
-	atomic_inc(&stop_operation);
-	del_timer_sync(&ipmi_timer);
+		initialized = false;
 
-	driver_unregister(&ipmidriver.driver);
-
-	initialized = 0;
-
-	/* Check for buffer leaks. */
-	count = atomic_read(&smi_msg_inuse_count);
-	if (count != 0)
-		pr_warn(PFX "SMI message count %d at exit\n", count);
-	count = atomic_read(&recv_msg_inuse_count);
-	if (count != 0)
-		pr_warn(PFX "recv message count %d at exit\n", count);
+		/* Check for buffer leaks. */
+		count = atomic_read(&smi_msg_inuse_count);
+		if (count != 0)
+			pr_warn(PFX "SMI message count %d at exit\n", count);
+		count = atomic_read(&recv_msg_inuse_count);
+		if (count != 0)
+			pr_warn(PFX "recv message count %d at exit\n", count);
+		cleanup_srcu_struct(&ipmi_interfaces_srcu);
+	}
+	if (drvregistered)
+		driver_unregister(&ipmidriver.driver);
 }
 module_exit(cleanup_ipmi);
 
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 9b78672..76c2010 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -630,8 +630,9 @@
 
 		/* Remove the multi-part read marker. */
 		len -= 2;
+		data += 2;
 		for (i = 0; i < len; i++)
-			ssif_info->data[i] = data[i+2];
+			ssif_info->data[i] = data[i];
 		ssif_info->multi_len = len;
 		ssif_info->multi_pos = 1;
 
@@ -659,8 +660,19 @@
 		}
 
 		blocknum = data[0];
+		len--;
+		data++;
 
-		if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
+		if (blocknum != 0xff && len != 31) {
+		    /* All blocks but the last must have 31 data bytes. */
+			result = -EIO;
+			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+				pr_info("Received middle message <31\n");
+
+			goto continue_op;
+		}
+
+		if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
 			/* Received message too big, abort the operation. */
 			result = -E2BIG;
 			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@@ -669,16 +681,14 @@
 			goto continue_op;
 		}
 
-		/* Remove the blocknum from the data. */
-		len--;
 		for (i = 0; i < len; i++)
-			ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
+			ssif_info->data[i + ssif_info->multi_len] = data[i];
 		ssif_info->multi_len += len;
 		if (blocknum == 0xff) {
 			/* End of read */
 			len = ssif_info->multi_len;
 			data = ssif_info->data;
-		} else if (blocknum + 1 != ssif_info->multi_pos) {
+		} else if (blocknum != ssif_info->multi_pos) {
 			/*
 			 * Out of sequence block, just abort.  Block
 			 * numbers start at zero for the second block,
@@ -706,6 +716,7 @@
 		}
 	}
 
+ continue_op:
 	if (result < 0) {
 		ssif_inc_stat(ssif_info, receive_errors);
 	} else {
@@ -713,8 +724,6 @@
 		ssif_inc_stat(ssif_info, received_message_parts);
 	}
 
-
- continue_op:
 	if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
 		pr_info(PFX "DONE 1: state = %d, result=%d.\n",
 			ssif_info->ssif_state, result);
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index b5e3103..e43c876 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -59,6 +59,7 @@
 #include <linux/mutex.h>
 #include <linux/delay.h>
 #include <linux/serial_8250.h>
+#include <linux/nospec.h>
 #include "smapi.h"
 #include "mwavedd.h"
 #include "3780i.h"
@@ -289,6 +290,8 @@
 						ipcnum);
 				return -EINVAL;
 			}
+			ipcnum = array_index_nospec(ipcnum,
+						    ARRAY_SIZE(pDrvData->IPCs));
 			PRINTK_3(TRACE_MWAVE,
 				"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
 				" ipcnum %x entry usIntCount %x\n",
@@ -317,6 +320,8 @@
 						" Invalid ipcnum %x\n", ipcnum);
 				return -EINVAL;
 			}
+			ipcnum = array_index_nospec(ipcnum,
+						    ARRAY_SIZE(pDrvData->IPCs));
 			PRINTK_3(TRACE_MWAVE,
 				"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
 				" ipcnum %x, usIntCount %x\n",
@@ -383,6 +388,8 @@
 						ipcnum);
 				return -EINVAL;
 			}
+			ipcnum = array_index_nospec(ipcnum,
+						    ARRAY_SIZE(pDrvData->IPCs));
 			mutex_lock(&mwave_mutex);
 			if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
 				pDrvData->IPCs[ipcnum].bIsEnabled = false;
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 7d958ff..1010cb7 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -477,13 +477,15 @@
 
 	if (need_locality) {
 		rc = tpm_request_locality(chip, flags);
-		if (rc < 0)
-			goto out_no_locality;
+		if (rc < 0) {
+			need_locality = false;
+			goto out_locality;
+		}
 	}
 
 	rc = tpm_cmd_ready(chip, flags);
 	if (rc)
-		goto out;
+		goto out_locality;
 
 	rc = tpm2_prepare_space(chip, space, ordinal, buf);
 	if (rc)
@@ -547,14 +549,13 @@
 		dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
 
 out:
-	rc = tpm_go_idle(chip, flags);
-	if (rc)
-		goto out;
+	/* may fail but do not override previous error value in rc */
+	tpm_go_idle(chip, flags);
 
+out_locality:
 	if (need_locality)
 		tpm_relinquish_locality(chip, flags);
 
-out_no_locality:
 	if (chip->ops->clk_enable != NULL)
 		chip->ops->clk_enable(chip, false);
 
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index caa86b1..f74f451 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -369,6 +369,7 @@
 	struct device *dev = chip->dev.parent;
 	struct i2c_client *client = to_i2c_client(dev);
 	u32 ordinal;
+	unsigned long duration;
 	size_t count = 0;
 	int burst_count, bytes2write, retries, rc = -EIO;
 
@@ -455,10 +456,12 @@
 		return rc;
 	}
 	ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
-	rc = i2c_nuvoton_wait_for_data_avail(chip,
-					     tpm_calc_ordinal_duration(chip,
-								       ordinal),
-					     &priv->read_queue);
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		duration = tpm2_calc_ordinal_duration(chip, ordinal);
+	else
+		duration = tpm_calc_ordinal_duration(chip, ordinal);
+
+	rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue);
 	if (rc) {
 		dev_err(dev, "%s() timeout command duration\n", __func__);
 		i2c_nuvoton_ready(chip);
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index 9903652..e695622 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -154,7 +154,7 @@
 
 struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
 			     u8 width, void __iomem *busy_reg, u8 busy_shift,
-			     const char **parent_names, int num_parents)
+			     const char * const *parent_names, int num_parents)
 {
 	struct clk_busy_mux *busy;
 	struct clk *clk;
diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
index c9b327e..44817c1 100644
--- a/drivers/clk/imx/clk-fixup-mux.c
+++ b/drivers/clk/imx/clk-fixup-mux.c
@@ -70,7 +70,7 @@
 };
 
 struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
-			      u8 shift, u8 width, const char **parents,
+			      u8 shift, u8 width, const char * const *parents,
 			      int num_parents, void (*fixup)(u32 *val))
 {
 	struct clk_fixup_mux *fixup_mux;
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 8c7c2fc..c509324 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -508,8 +508,12 @@
 	 * lvds1_gate and lvds2_gate are pseudo-gates.  Both can be
 	 * independently configured as clock inputs or outputs.  We treat
 	 * the "output_enable" bit as a gate, even though it's really just
-	 * enabling clock output.
+	 * enabling clock output. Initially the gate bits are cleared, as
+	 * otherwise the exclusive configuration gets locked in the setup done
+	 * by software running before the clock driver, with no way to change
+	 * it.
 	 */
+	writel(readl(base + 0x160) & ~0x3c00, base + 0x160);
 	clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12));
 	clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13));
 
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 8076ec0..e65c111 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -63,14 +63,14 @@
 
 struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
 			     u8 width, void __iomem *busy_reg, u8 busy_shift,
-			     const char **parent_names, int num_parents);
+			     const char * const *parent_names, int num_parents);
 
 struct clk *imx_clk_fixup_divider(const char *name, const char *parent,
 				  void __iomem *reg, u8 shift, u8 width,
 				  void (*fixup)(u32 *val));
 
 struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
-			      u8 shift, u8 width, const char **parents,
+			      u8 shift, u8 width, const char * const *parents,
 			      int num_parents, void (*fixup)(u32 *val));
 
 static inline struct clk *imx_clk_fixed(const char *name, int rate)
@@ -79,7 +79,8 @@
 }
 
 static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg,
-		u8 shift, u8 width, const char **parents, int num_parents)
+			u8 shift, u8 width, const char * const *parents,
+			int num_parents)
 {
 	return clk_register_mux(NULL, name, parents, num_parents,
 			CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg,
@@ -192,7 +193,8 @@
 }
 
 static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
-		u8 shift, u8 width, const char **parents, int num_parents)
+			u8 shift, u8 width, const char * const *parents,
+			int num_parents)
 {
 	return clk_register_mux(NULL, name, parents, num_parents,
 			CLK_SET_RATE_NO_REPARENT, reg, shift,
@@ -200,7 +202,8 @@
 }
 
 static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
-		u8 shift, u8 width, const char **parents, int num_parents)
+			u8 shift, u8 width, const char * const *parents,
+			int num_parents)
 {
 	return clk_register_mux(NULL, name, parents, num_parents,
 			CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
@@ -208,8 +211,9 @@
 }
 
 static inline struct clk *imx_clk_mux_flags(const char *name,
-		void __iomem *reg, u8 shift, u8 width, const char **parents,
-		int num_parents, unsigned long flags)
+			void __iomem *reg, u8 shift, u8 width,
+			const char * const *parents, int num_parents,
+			unsigned long flags)
 {
 	return clk_register_mux(NULL, name, parents, num_parents,
 			flags | CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0,
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 74697e1..50060e8 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -568,13 +568,14 @@
 };
 
 static const struct clk_div_table cpu_scale_table[] = {
-	{ .val = 2, .div = 4 },
-	{ .val = 3, .div = 6 },
-	{ .val = 4, .div = 8 },
-	{ .val = 5, .div = 10 },
-	{ .val = 6, .div = 12 },
-	{ .val = 7, .div = 14 },
-	{ .val = 8, .div = 16 },
+	{ .val = 1, .div = 4 },
+	{ .val = 2, .div = 6 },
+	{ .val = 3, .div = 8 },
+	{ .val = 4, .div = 10 },
+	{ .val = 5, .div = 12 },
+	{ .val = 6, .div = 14 },
+	{ .val = 7, .div = 16 },
+	{ .val = 8, .div = 18 },
 	{ /* sentinel */ },
 };
 
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 46e5628..be4abad 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -366,3 +366,12 @@
 	  LITO devices.
 	  Say Y if you want to support video devices and functionality such as
 	  video encode/decode.
+
+config SM_CAMCC_LITO
+	tristate "LITO Camera Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the camera clock controller on Qualcomm Technologies, Inc.
+	  LITO devices.
+	  Say Y if you want to support camera devices and functionality such as
+	  capturing pictures.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index c8d9225..696eadb 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -52,6 +52,7 @@
 obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
 obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
 obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
+obj-$(CONFIG_SM_CAMCC_LITO) += camcc-lito.o
 obj-$(CONFIG_SM_GCC_LITO) += gcc-lito.o
 obj-$(CONFIG_SM_VIDEOCC_LITO) += videocc-lito.o
 obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/drivers/clk/qcom/camcc-kona.c b/drivers/clk/qcom/camcc-kona.c
index c859e3c..912f372 100644
--- a/drivers/clk/qcom/camcc-kona.c
+++ b/drivers/clk/qcom/camcc-kona.c
@@ -446,6 +446,7 @@
 		.name = "cam_cc_sbi_div_clk_src",
 		.parent_names = (const char *[]){ "cam_cc_ife_0_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
diff --git a/drivers/clk/qcom/camcc-lito.c b/drivers/clk/qcom/camcc-lito.c
new file mode 100644
index 0000000..10842bb
--- /dev/null
+++ b/drivers/clk/qcom/camcc-lito.c
@@ -0,0 +1,2377 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,camcc-lito.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "vdd-level.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_BI_TCXO_MX,
+	P_CAM_CC_PLL0_OUT_EVEN,
+	P_CAM_CC_PLL0_OUT_MAIN,
+	P_CAM_CC_PLL0_OUT_ODD,
+	P_CAM_CC_PLL1_OUT_EVEN,
+	P_CAM_CC_PLL2_OUT_AUX,
+	P_CAM_CC_PLL2_OUT_EARLY,
+	P_CAM_CC_PLL2_OUT_MAIN,
+	P_CAM_CC_PLL3_OUT_EVEN,
+	P_CAM_CC_PLL4_OUT_EVEN,
+	P_CHIP_SLEEP_CLK,
+	P_CORE_BI_PLL_TEST_SE,
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL0_OUT_MAIN, 1 },
+	{ P_CAM_CC_PLL0_OUT_EVEN, 2 },
+	{ P_CAM_CC_PLL0_OUT_ODD, 3 },
+	{ P_CAM_CC_PLL2_OUT_MAIN, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"cam_cc_pll0",
+	"cam_cc_pll0_out_even",
+	"cam_cc_pll0_out_odd",
+	"cam_cc_pll2_out_main",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+	{ P_BI_TCXO_MX, 0 },
+	{ P_CAM_CC_PLL2_OUT_AUX, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"cam_cc_pll2_out_aux",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL0_OUT_MAIN, 1 },
+	{ P_CAM_CC_PLL0_OUT_EVEN, 2 },
+	{ P_CAM_CC_PLL0_OUT_ODD, 3 },
+	{ P_CAM_CC_PLL1_OUT_EVEN, 4 },
+	{ P_CAM_CC_PLL2_OUT_EARLY, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_2[] = {
+	"bi_tcxo",
+	"cam_cc_pll0",
+	"cam_cc_pll0_out_even",
+	"cam_cc_pll0_out_odd",
+	"cam_cc_pll1_out_even",
+	"cam_cc_pll2",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL0_OUT_MAIN, 1 },
+	{ P_CAM_CC_PLL0_OUT_EVEN, 2 },
+	{ P_CAM_CC_PLL0_OUT_ODD, 3 },
+	{ P_CAM_CC_PLL2_OUT_EARLY, 5 },
+	{ P_CAM_CC_PLL4_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_3[] = {
+	"bi_tcxo",
+	"cam_cc_pll0",
+	"cam_cc_pll0_out_even",
+	"cam_cc_pll0_out_odd",
+	"cam_cc_pll2",
+	"cam_cc_pll4_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL3_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_4[] = {
+	"bi_tcxo",
+	"cam_cc_pll3_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL4_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_5[] = {
+	"bi_tcxo",
+	"cam_cc_pll4_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL1_OUT_EVEN, 4 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_6[] = {
+	"bi_tcxo",
+	"cam_cc_pll1_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+	{ P_CHIP_SLEEP_CLK, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_7[] = {
+	"chip_sleep_clk",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_8[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL0_OUT_ODD, 3 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_8[] = {
+	"bi_tcxo",
+	"cam_cc_pll0_out_odd",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_9[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_9[] = {
+	"bi_tcxo",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco lucid_vco[] = {
+	{ 249600000, 2000000000, 0 },
+};
+
+static struct pll_vco zonda_vco[] = {
+	{ 595200000, 3600000000, 0 },
+};
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+	.l = 0x3E,
+	.cal_l = 0x44,
+	.alpha = 0x8000,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00003101,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_mx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_cam_cc_pll0_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll0_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll0" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+	{ 0x3, 3 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+	.offset = 0x0,
+	.post_div_shift = 12,
+	.post_div_table = post_div_table_cam_cc_pll0_out_odd,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll0_out_odd",
+		.parent_names = (const char *[]){ "cam_cc_pll0" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static const struct alpha_pll_config cam_cc_pll1_config = {
+	.l = 0x1F,
+	.cal_l = 0x44,
+	.alpha = 0x4000,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000101,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+	.offset = 0x1000,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll1",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_mx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+	.offset = 0x1000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_cam_cc_pll1_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll1_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll1" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+	.l = 0x32,
+	.cal_l = 0x32,
+	.alpha = 0x0,
+	.config_ctl_val = 0x08200920,
+	.config_ctl_hi_val = 0x05008001,
+	.config_ctl_hi1_val = 0x00000000,
+	.user_ctl_val = 0x00000108,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+	.offset = 0x2000,
+	.vco_table = zonda_vco,
+	.num_vco = ARRAY_SIZE(zonda_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll2",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_zonda_ops,
+			.vdd_class = &vdd_mx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_LOWER] = 1600000000,
+				[VDD_LOW] = 2000000000,
+				[VDD_NOMINAL] = 2900000000,
+				[VDD_HIGH] = 3600000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_aux[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux = {
+	.offset = 0x2000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_cam_cc_pll2_out_aux,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_aux),
+	.width = 2,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll2_out_aux",
+		.parent_names = (const char *[]){ "cam_cc_pll2" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_zonda_ops,
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_main[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = {
+	.offset = 0x2000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_cam_cc_pll2_out_main,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_main),
+	.width = 2,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll2_out_main",
+		.parent_names = (const char *[]){ "cam_cc_pll2" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_zonda_ops,
+	},
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+	.l = 0x27,
+	.cal_l = 0x44,
+	.alpha = 0x9555,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000101,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+	.offset = 0x3000,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll3",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_mx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+	.offset = 0x3000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_cam_cc_pll3_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll3_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll3" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+	.l = 0x27,
+	.cal_l = 0x44,
+	.alpha = 0x9555,
+	.config_ctl_val = 0x20485699,
+	.config_ctl_hi_val = 0x00002261,
+	.config_ctl_hi1_val = 0x029A699C,
+	.user_ctl_val = 0x00000101,
+	.user_ctl_hi_val = 0x00000805,
+	.user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+	.offset = 0x4000,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll4",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_lucid_ops,
+			.vdd_class = &vdd_mx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+	.offset = 0x4000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_cam_cc_pll4_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll4_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll4" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+	.cmd_rcgr = 0x7010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_bps_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_bps_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 200000000,
+			[VDD_LOW] = 400000000,
+			[VDD_LOW_L1] = 480000000,
+			[VDD_NOMINAL] = 600000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+	F(240000000, P_CAM_CC_PLL2_OUT_MAIN, 2, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+	.cmd_rcgr = 0xc12c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_camnoc_axi_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 150000000,
+			[VDD_LOW] = 240000000,
+			[VDD_LOW_L1] = 320000000,
+			[VDD_NOMINAL] = 400000000,
+			[VDD_HIGH] = 480000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+	.cmd_rcgr = 0xc0c4,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_cci_0_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 37500000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+	.cmd_rcgr = 0xc0e0,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_cci_1_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 37500000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	F(384000000, P_CAM_CC_PLL2_OUT_EARLY, 2.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+	.cmd_rcgr = 0xa064,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_2,
+	.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_cphy_rx_clk_src",
+		.parent_names = cam_cc_parent_names_2,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 384000000,
+			[VDD_LOW_L1] = 400000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+	.cmd_rcgr = 0x6004,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi0phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+	.cmd_rcgr = 0x6028,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi1phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+	.cmd_rcgr = 0x604c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi2phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+	.cmd_rcgr = 0x6070,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi3phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+	.cmd_rcgr = 0x703c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_fast_ahb_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW] = 200000000,
+			[VDD_LOW_L1] = 300000000,
+			[VDD_NOMINAL] = 400000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(380000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(384000000, P_CAM_CC_PLL2_OUT_EARLY, 2.5, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_fd_core_clk_src = {
+	.cmd_rcgr = 0xc09c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_3,
+	.freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_fd_core_clk_src",
+		.parent_names = cam_cc_parent_names_3,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 380000000,
+			[VDD_LOW] = 384000000,
+			[VDD_LOW_L1] = 480000000,
+			[VDD_NOMINAL] = 600000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+	.cmd_rcgr = 0xc074,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_icp_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_icp_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 400000000,
+			[VDD_LOW_L1] = 600000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(380000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(510000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(637000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(760000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+	.cmd_rcgr = 0xa010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_4,
+	.freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_0_clk_src",
+		.parent_names = cam_cc_parent_names_4,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 380000000,
+			[VDD_LOW] = 510000000,
+			[VDD_LOW_L1] = 637000000,
+			[VDD_NOMINAL] = 760000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	F(384000000, P_CAM_CC_PLL2_OUT_EARLY, 2.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+	.cmd_rcgr = 0xa03c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_2,
+	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_0_csid_clk_src",
+		.parent_names = cam_cc_parent_names_2,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 384000000,
+			[VDD_LOW_L1] = 400000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(380000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(510000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(637000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	F(760000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+	.cmd_rcgr = 0xb010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_5,
+	.freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_1_clk_src",
+		.parent_names = cam_cc_parent_names_5,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 380000000,
+			[VDD_LOW] = 510000000,
+			[VDD_LOW_L1] = 637000000,
+			[VDD_NOMINAL] = 760000000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+	.cmd_rcgr = 0xb034,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_2,
+	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_1_csid_clk_src",
+		.parent_names = cam_cc_parent_names_2,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 384000000,
+			[VDD_LOW_L1] = 400000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+	.cmd_rcgr = 0xc004,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_lite_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 320000000,
+			[VDD_LOW] = 400000000,
+			[VDD_LOW_L1] = 480000000,
+			[VDD_NOMINAL] = 600000000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+	.cmd_rcgr = 0xc020,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_2,
+	.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_lite_csid_clk_src",
+		.parent_names = cam_cc_parent_names_2,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 384000000,
+			[VDD_LOW_L1] = 400000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+	F(430000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+	F(520000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+	.cmd_rcgr = 0x8010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_6,
+	.freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ipe_0_clk_src",
+		.parent_names = cam_cc_parent_names_6,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 300000000,
+			[VDD_LOW] = 430000000,
+			[VDD_LOW_L1] = 520000000,
+			[VDD_NOMINAL] = 600000000},
+	},
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+	.cmd_rcgr = 0xc048,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_bps_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_jpeg_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 200000000,
+			[VDD_LOW] = 400000000,
+			[VDD_LOW_L1] = 480000000,
+			[VDD_NOMINAL] = 600000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(240000000, P_CAM_CC_PLL2_OUT_MAIN, 2, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+	.cmd_rcgr = 0xc100,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_lrme_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_lrme_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 240000000,
+			[VDD_LOW] = 300000000,
+			[VDD_LOW_L1] = 320000000,
+			[VDD_NOMINAL] = 400000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+	F(19200000, P_BI_TCXO_MX, 1, 0, 0),
+	F(24000000, P_CAM_CC_PLL2_OUT_AUX, 1, 1, 20),
+	F(34285714, P_CAM_CC_PLL2_OUT_AUX, 14, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+	.cmd_rcgr = 0x5004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk0_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_mx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 34285714},
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+	.cmd_rcgr = 0x5024,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk1_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_mx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 34285714},
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+	.cmd_rcgr = 0x5044,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk2_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_mx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 34285714},
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+	.cmd_rcgr = 0x5064,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk3_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_mx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 34285714},
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+	.cmd_rcgr = 0x5084,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk4_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_mx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 34285714},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+	F(32000, P_CHIP_SLEEP_CLK, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+	.cmd_rcgr = 0xc1a4,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_7,
+	.freq_tbl = ftbl_cam_cc_sleep_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_sleep_clk_src",
+		.parent_names = cam_cc_parent_names_7,
+		.num_parents = 2,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 32000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(80000000, P_CAM_CC_PLL0_OUT_ODD, 5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+	.cmd_rcgr = 0x7058,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_8,
+	.freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_slow_ahb_clk_src",
+		.parent_names = cam_cc_parent_names_8,
+		.num_parents = 3,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 80000000},
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+	.cmd_rcgr = 0xc188,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_9,
+	.freq_tbl = ftbl_cam_cc_xo_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_xo_clk_src",
+		.parent_names = cam_cc_parent_names_9,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+	},
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+	.halt_reg = 0x7070,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7070,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+	.halt_reg = 0x7054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+	.halt_reg = 0x7038,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_axi_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_camnoc_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+	.halt_reg = 0x7028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_bps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+	.halt_reg = 0xc148,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc148,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_camnoc_axi_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_camnoc_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+	.halt_reg = 0xc150,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc150,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_camnoc_dcd_xo_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+	.halt_reg = 0xc0dc,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc0dc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_cci_0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cci_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+	.halt_reg = 0xc0f8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc0f8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_cci_1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cci_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+	.halt_reg = 0xc184,
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0xc184,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_core_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+	.halt_reg = 0xc124,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc124,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_cpas_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+	.halt_reg = 0x601c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x601c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi0phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi0phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+	.halt_reg = 0x6040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi1phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi1phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+	.halt_reg = 0x6064,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi2phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi2phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+	.halt_reg = 0x6088,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6088,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi3phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi3phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+	.halt_reg = 0x6020,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+	.halt_reg = 0x6044,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+	.halt_reg = 0x6068,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6068,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy2_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+	.halt_reg = 0x608c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x608c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy3_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_fd_core_clk = {
+	.halt_reg = 0xc0b4,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc0b4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_fd_core_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fd_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_fd_core_uar_clk = {
+	.halt_reg = 0xc0bc,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc0bc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_fd_core_uar_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fd_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_gdsc_clk = {
+	.halt_reg = 0xc1a0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc1a0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_gdsc_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_xo_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+	.halt_reg = 0xc094,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc094,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+	.halt_reg = 0xc08c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc08c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_icp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+	.halt_reg = 0xa080,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa080,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_axi_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_camnoc_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+	.halt_reg = 0xa028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+	.halt_reg = 0xa07c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa07c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+	.halt_reg = 0xa054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+	.halt_reg = 0xa038,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_dsp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+	.halt_reg = 0xb058,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb058,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_axi_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_camnoc_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+	.halt_reg = 0xb028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+	.halt_reg = 0xb054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+	.halt_reg = 0xb04c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb04c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+	.halt_reg = 0xb030,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb030,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_dsp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+	.halt_reg = 0xc01c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc01c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_lite_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+	.halt_reg = 0xc040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+	.halt_reg = 0xc038,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_lite_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+	.halt_reg = 0x8040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+	.halt_reg = 0x803c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x803c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+	.halt_reg = 0x8038,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_axi_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_camnoc_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+	.halt_reg = 0x8028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ipe_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_ahb_clk = {
+	.halt_reg = 0x9028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_areg_clk = {
+	.halt_reg = 0x9024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_axi_clk = {
+	.halt_reg = 0x9020,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_axi_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_camnoc_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_clk = {
+	.halt_reg = 0x9010,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ipe_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+	.halt_reg = 0xc060,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc060,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_jpeg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_jpeg_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+	.halt_reg = 0xc118,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc118,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_lrme_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_lrme_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+	.halt_reg = 0x501c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x501c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+	.halt_reg = 0x503c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x503c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+	.halt_reg = 0x505c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x505c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk2_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+	.halt_reg = 0x507c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x507c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk3_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+	.halt_reg = 0x509c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x509c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk4_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk4_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_sleep_clk = {
+	.halt_reg = 0xc1bc,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc1bc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_sleep_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_sleep_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *cam_cc_lito_clocks[] = {
+	[CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+	[CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+	[CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+	[CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+	[CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+	[CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+	[CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+	[CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+	[CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+	[CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+	[CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+	[CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+	[CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+	[CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+	[CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+	[CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+	[CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+	[CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+	[CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+	[CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+	[CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+	[CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+	[CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+	[CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+	[CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+	[CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+	[CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+	[CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+	[CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
+	[CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
+	[CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr,
+	[CAM_CC_GDSC_CLK] = &cam_cc_gdsc_clk.clkr,
+	[CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+	[CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+	[CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+	[CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+	[CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+	[CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+	[CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+	[CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+	[CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+	[CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+	[CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+	[CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+	[CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+	[CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+	[CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+	[CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+	[CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+	[CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+	[CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+	[CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+	[CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+	[CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+	[CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+	[CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+	[CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr,
+	[CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr,
+	[CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr,
+	[CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr,
+	[CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+	[CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+	[CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+	[CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+	[CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+	[CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+	[CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+	[CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+	[CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+	[CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+	[CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+	[CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+	[CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+	[CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+	[CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+	[CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+	[CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+	[CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+	[CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+	[CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+	[CAM_CC_PLL2_OUT_AUX] = &cam_cc_pll2_out_aux.clkr,
+	[CAM_CC_PLL2_OUT_MAIN] = &cam_cc_pll2_out_main.clkr,
+	[CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+	[CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+	[CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+	[CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+	[CAM_CC_SLEEP_CLK] = &cam_cc_sleep_clk.clkr,
+	[CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+	[CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+	[CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static const struct regmap_config cam_cc_lito_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0xd028,
+	.fast_io = true,
+};
+
+static const struct qcom_cc_desc cam_cc_lito_desc = {
+	.config = &cam_cc_lito_regmap_config,
+	.clks = cam_cc_lito_clocks,
+	.num_clks = ARRAY_SIZE(cam_cc_lito_clocks),
+};
+
+static const struct of_device_id cam_cc_lito_match_table[] = {
+	{ .compatible = "qcom,lito-camcc" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_lito_match_table);
+
+static int cam_cc_lito_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	struct clk *clk;
+	int ret;
+
+	clk = clk_get(&pdev->dev, "cfg_ahb_clk");
+	if (IS_ERR(clk)) {
+		if (PTR_ERR(clk) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get ahb clock handle\n");
+		return PTR_ERR(clk);
+	}
+	clk_put(clk);
+
+	vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(vdd_mx.regulator[0])) {
+		if (PTR_ERR(vdd_mx.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"Unable to get vdd_mx regulator\n");
+		return PTR_ERR(vdd_mx.regulator[0]);
+	}
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (PTR_ERR(vdd_cx.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	regmap = qcom_cc_map(pdev, &cam_cc_lito_desc);
+	if (IS_ERR(regmap)) {
+		dev_err(&pdev->dev, "Failed to map the cam CC registers\n");
+		return PTR_ERR(regmap);
+	}
+
+	clk_lucid_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+	clk_lucid_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+	clk_zonda_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+	clk_lucid_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+	clk_lucid_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+
+	ret = qcom_cc_really_probe(pdev, &cam_cc_lito_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register CAM CC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered CAM CC clocks\n");
+	return 0;
+}
+
+static struct platform_driver cam_cc_lito_driver = {
+	.probe = cam_cc_lito_probe,
+	.driver = {
+		.name = "lito-camcc",
+		.of_match_table = cam_cc_lito_match_table,
+	},
+};
+
+static int __init cam_cc_lito_init(void)
+{
+	return platform_driver_register(&cam_cc_lito_driver);
+}
+subsys_initcall(cam_cc_lito_init);
+
+static void __exit cam_cc_lito_exit(void)
+{
+	platform_driver_unregister(&cam_cc_lito_driver);
+}
+module_exit(cam_cc_lito_exit);
+
+MODULE_DESCRIPTION("QTI CAM_CC LITO Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cam_cc-lito");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index afc76d1..a0521d1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1512,6 +1512,9 @@
 void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
 				const struct alpha_pll_config *config)
 {
+	if (lucid_pll_is_enabled(pll, regmap))
+		return;
+
 	if (config->l)
 		regmap_write(regmap, PLL_L_VAL(pll), config->l);
 
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index a31079c..6e2b645 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -1324,6 +1324,9 @@
 	if (cfg & mask)
 		f->pre_div = cfg & mask;
 
+	mode = cfg & CFG_MODE_MASK;
+	mode >>= CFG_MODE_SHIFT;
+
 	cfg &= CFG_SRC_SEL_MASK;
 	cfg >>= CFG_SRC_SEL_SHIFT;
 
@@ -1338,8 +1341,6 @@
 		}
 	}
 
-	mode = cfg & CFG_MODE_MASK;
-	mode >>= CFG_MODE_SHIFT;
 	if (mode) {
 		mask = BIT(rcg->mnd_width) - 1;
 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
index 1ee75a5..a9f4753 100644
--- a/drivers/clk/qcom/clk-regmap-divider.c
+++ b/drivers/clk/qcom/clk-regmap-divider.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -43,8 +35,10 @@
 {
 	struct clk_regmap_div *divider = to_clk_regmap_div(hw);
 
-	return divider_round_rate(hw, rate, prate, NULL, divider->width,
-				  CLK_DIVIDER_ROUND_CLOSEST);
+	return divider_round_rate(hw, rate, prate, divider->table,
+				  divider->width,
+				  CLK_DIVIDER_ROUND_CLOSEST |
+				  divider->flags);
 }
 
 static int div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -54,8 +48,9 @@
 	struct clk_regmap *clkr = &divider->clkr;
 	u32 div;
 
-	div = divider_get_val(rate, parent_rate, NULL, divider->width,
-			      CLK_DIVIDER_ROUND_CLOSEST);
+	div = divider_get_val(rate, parent_rate, divider->table,
+			      divider->width, CLK_DIVIDER_ROUND_CLOSEST |
+			      divider->flags);
 
 	return regmap_update_bits(clkr->regmap, divider->reg,
 				  (BIT(divider->width) - 1) << divider->shift,
@@ -73,8 +68,9 @@
 	div >>= divider->shift;
 	div &= BIT(divider->width) - 1;
 
-	return divider_recalc_rate(hw, parent_rate, div, NULL,
-				   CLK_DIVIDER_ROUND_CLOSEST, divider->width);
+	return divider_recalc_rate(hw, parent_rate, div, divider->table,
+				   CLK_DIVIDER_ROUND_CLOSEST | divider->flags,
+				   divider->width);
 }
 
 const struct clk_ops clk_regmap_div_ops = {
diff --git a/drivers/clk/qcom/clk-regmap-divider.h b/drivers/clk/qcom/clk-regmap-divider.h
index f61fdf9..37c9901 100644
--- a/drivers/clk/qcom/clk-regmap-divider.h
+++ b/drivers/clk/qcom/clk-regmap-divider.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2014,2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __QCOM_CLK_REGMAP_DIVIDER_H__
@@ -10,11 +10,12 @@
 #include "clk-regmap.h"
 
 struct clk_regmap_div {
-	u32			reg;
-	u32			shift;
-	u32			width;
-	u32			flags;
-	struct clk_regmap	clkr;
+	u32				reg;
+	u32				shift;
+	u32				width;
+	u32				flags;
+	const struct clk_div_table	*table;
+	struct clk_regmap		clkr;
 };
 
 extern const struct clk_ops clk_regmap_div_ops;
diff --git a/drivers/clk/qcom/dispcc-kona.c b/drivers/clk/qcom/dispcc-kona.c
index 8cd708f..46592a4 100644
--- a/drivers/clk/qcom/dispcc-kona.c
+++ b/drivers/clk/qcom/dispcc-kona.c
@@ -31,6 +31,8 @@
 
 static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NUM, 1, vdd_corner);
 
+#define DISP_CC_MISC_CMD	0x8000
+
 enum {
 	P_BI_TCXO,
 	P_CHIP_SLEEP_CLK,
@@ -306,115 +308,6 @@
 	},
 };
 
-
-static struct clk_regmap_div disp_cc_mdss_spdm_dp_crypto_div_clk_src = {
-	.reg = 0x6034,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_dp_crypto_div_clk_src",
-		.parent_names =
-			(const char *[]){ "disp_cc_mdss_dp_crypto_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_mdss_spdm_dp_pixel1_div_clk_src = {
-	.reg = 0x603c,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_dp_pixel1_div_clk_src",
-		.parent_names =
-			(const char *[]){ "disp_cc_mdss_dp_pixel1_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_mdss_spdm_dp_pixel_div_clk_src = {
-	.reg = 0x6038,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_dp_pixel_div_clk_src",
-		.parent_names =
-			(const char *[]){ "disp_cc_mdss_dp_pixel_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_mdss_spdm_mdp_div_clk_src = {
-	.reg = 0x602c,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_mdp_div_clk_src",
-		.parent_names = (const char *[]){ "disp_cc_mdss_mdp_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_mdss_spdm_pclk0_div_clk_src = {
-	.reg = 0x6024,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_pclk0_div_clk_src",
-		.parent_names =
-			(const char *[]){ "disp_cc_mdss_pclk0_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_mdss_spdm_pclk1_div_clk_src = {
-	.reg = 0x6028,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_pclk1_div_clk_src",
-		.parent_names =
-			(const char *[]){ "disp_cc_mdss_pclk1_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_mdss_spdm_rot_div_clk_src = {
-	.reg = 0x6030,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_mdss_spdm_rot_div_clk_src",
-		.parent_names = (const char *[]){ "disp_cc_mdss_rot_clk_src" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
-
-static struct clk_regmap_div disp_cc_pll_test_div_clk_src = {
-	.reg = 0x5014,
-	.shift = 0,
-	.width = 2,
-	.clkr.hw.init = &(struct clk_init_data) {
-		.name = "disp_cc_pll_test_div_clk_src",
-		.parent_names = (const char *[]){ "disp_cc_pll0" },
-		.num_parents = 1,
-		.ops = &clk_regmap_div_ro_ops,
-	},
-};
-
 static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
 	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
@@ -978,38 +871,6 @@
 	},
 };
 
-static struct clk_rcg2 disp_cc_xo_clk_src = {
-	.cmd_rcgr = 0x6044,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = disp_cc_parent_map_1,
-	.freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "disp_cc_xo_clk_src",
-		.parent_names = disp_cc_parent_names_1,
-		.num_parents = 2,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_rcg2_ops,
-		.vdd_class = &vdd_mm,
-		.num_rate_max = VDD_NUM,
-		.rate_max = (unsigned long[VDD_NUM]) {
-			[VDD_MIN] = 19200000},
-	},
-};
-
-static struct clk_branch disp_cc_debug_clk = {
-	.halt_reg = 0x500c,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x500c,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_debug_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch disp_cc_mdss_ahb_clk = {
 	.halt_reg = 0x2080,
 	.halt_check = BRANCH_HALT,
@@ -1567,145 +1428,6 @@
 	},
 };
 
-static struct clk_branch disp_cc_mdss_spdm_debug_clk = {
-	.halt_reg = 0x6020,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x6020,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_debug_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_dp_crypto_clk = {
-	.halt_reg = 0x6014,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x6014,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_dp_crypto_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_dp_crypto_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_dp_pixel1_clk = {
-	.halt_reg = 0x601c,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x601c,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_dp_pixel1_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_dp_pixel1_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_dp_pixel_clk = {
-	.halt_reg = 0x6018,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x6018,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_dp_pixel_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_dp_pixel_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_mdp_clk = {
-	.halt_reg = 0x600c,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x600c,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_mdp_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_mdp_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_pclk0_clk = {
-	.halt_reg = 0x6004,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x6004,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_pclk0_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_pclk0_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_pclk1_clk = {
-	.halt_reg = 0x6008,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x6008,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_pclk1_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_pclk1_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch disp_cc_mdss_spdm_rot_clk = {
-	.halt_reg = 0x6010,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x6010,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_mdss_spdm_rot_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_mdss_spdm_rot_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch disp_cc_mdss_vsync_clk = {
 	.halt_reg = 0x2024,
 	.halt_check = BRANCH_HALT,
@@ -1724,24 +1446,6 @@
 	},
 };
 
-static struct clk_branch disp_cc_pll_test_clk = {
-	.halt_reg = 0x5018,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x5018,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "disp_cc_pll_test_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_pll_test_div_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch disp_cc_sleep_clk = {
 	.halt_reg = 0x6078,
 	.halt_check = BRANCH_HALT,
@@ -1768,10 +1472,6 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "disp_cc_xo_clk",
-			.parent_names = (const char *[]){
-				"disp_cc_xo_clk_src",
-			},
-			.num_parents = 1,
 			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
@@ -1779,7 +1479,6 @@
 };
 
 static struct clk_regmap *disp_cc_kona_clocks[] = {
-	[DISP_CC_DEBUG_CLK] = &disp_cc_debug_clk.clkr,
 	[DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
 	[DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
 	[DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
@@ -1842,46 +1541,18 @@
 	[DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
 	[DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
 	[DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
-	[DISP_CC_MDSS_SPDM_DEBUG_CLK] = &disp_cc_mdss_spdm_debug_clk.clkr,
-	[DISP_CC_MDSS_SPDM_DP_CRYPTO_CLK] =
-		&disp_cc_mdss_spdm_dp_crypto_clk.clkr,
-	[DISP_CC_MDSS_SPDM_DP_CRYPTO_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_dp_crypto_div_clk_src.clkr,
-	[DISP_CC_MDSS_SPDM_DP_PIXEL1_CLK] =
-		&disp_cc_mdss_spdm_dp_pixel1_clk.clkr,
-	[DISP_CC_MDSS_SPDM_DP_PIXEL1_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_dp_pixel1_div_clk_src.clkr,
-	[DISP_CC_MDSS_SPDM_DP_PIXEL_CLK] = &disp_cc_mdss_spdm_dp_pixel_clk.clkr,
-	[DISP_CC_MDSS_SPDM_DP_PIXEL_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_dp_pixel_div_clk_src.clkr,
-	[DISP_CC_MDSS_SPDM_MDP_CLK] = &disp_cc_mdss_spdm_mdp_clk.clkr,
-	[DISP_CC_MDSS_SPDM_MDP_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_mdp_div_clk_src.clkr,
-	[DISP_CC_MDSS_SPDM_PCLK0_CLK] = &disp_cc_mdss_spdm_pclk0_clk.clkr,
-	[DISP_CC_MDSS_SPDM_PCLK0_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_pclk0_div_clk_src.clkr,
-	[DISP_CC_MDSS_SPDM_PCLK1_CLK] = &disp_cc_mdss_spdm_pclk1_clk.clkr,
-	[DISP_CC_MDSS_SPDM_PCLK1_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_pclk1_div_clk_src.clkr,
-	[DISP_CC_MDSS_SPDM_ROT_CLK] = &disp_cc_mdss_spdm_rot_clk.clkr,
-	[DISP_CC_MDSS_SPDM_ROT_DIV_CLK_SRC] =
-		&disp_cc_mdss_spdm_rot_div_clk_src.clkr,
 	[DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
 	[DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
 	[DISP_CC_PLL0] = &disp_cc_pll0.clkr,
 	[DISP_CC_PLL1] = &disp_cc_pll1.clkr,
-	[DISP_CC_PLL_TEST_CLK] = &disp_cc_pll_test_clk.clkr,
-	[DISP_CC_PLL_TEST_DIV_CLK_SRC] = &disp_cc_pll_test_div_clk_src.clkr,
 	[DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
 	[DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
 	[DISP_CC_XO_CLK] = &disp_cc_xo_clk.clkr,
-	[DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
 };
 
 static const struct qcom_reset_map disp_cc_kona_resets[] = {
 	[DISP_CC_MDSS_CORE_BCR] = { 0x2000 },
 	[DISP_CC_MDSS_RSCC_BCR] = { 0x4000 },
-	[DISP_CC_MDSS_SPDM_BCR] = { 0x6000 },
 };
 
 static const struct regmap_config disp_cc_kona_regmap_config = {
@@ -1938,6 +1609,9 @@
 	clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
 	clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
 
+	/* Enable clock gating for MDP clocks */
+	regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+
 	ret = qcom_cc_really_probe(pdev, &disp_cc_kona_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register Display CC clocks\n");
diff --git a/drivers/clk/qcom/gcc-kona.c b/drivers/clk/qcom/gcc-kona.c
index c9036ad..7ba76b0 100644
--- a/drivers/clk/qcom/gcc-kona.c
+++ b/drivers/clk/qcom/gcc-kona.c
@@ -219,6 +219,7 @@
 		.name = "gcc_cpuss_ahb_postdiv_clk_src",
 		.parent_names = (const char *[]){ "gcc_cpuss_ahb_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
@@ -232,6 +233,7 @@
 		.parent_names =
 			(const char *[]){ "gcc_usb30_prim_mock_utmi_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
@@ -245,6 +247,7 @@
 		.parent_names =
 			(const char *[]){ "gcc_usb30_sec_mock_utmi_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
diff --git a/drivers/clk/qcom/gcc-lito.c b/drivers/clk/qcom/gcc-lito.c
index 3ed2867..7ca26b8 100644
--- a/drivers/clk/qcom/gcc-lito.c
+++ b/drivers/clk/qcom/gcc-lito.c
@@ -2690,11 +2690,14 @@
 	[GCC_PRNG_BCR] = { 0x34000 },
 	[GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
 	[GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+	[GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
 	[GCC_SDCC1_BCR] = { 0x26000 },
 	[GCC_SDCC2_BCR] = { 0x14000 },
 	[GCC_SDCC4_BCR] = { 0x16000 },
 	[GCC_UFS_PHY_BCR] = { 0x77000 },
 	[GCC_USB30_PRIM_BCR] = { 0xf000 },
+	[GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+	[GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
 	[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
 };
 
diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c
index 7751839..163073e 100644
--- a/drivers/clk/qcom/gdsc-regulator.c
+++ b/drivers/clk/qcom/gdsc-regulator.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -26,6 +26,7 @@
 #define PWR_ON_MASK		BIT(31)
 #define CLK_DIS_WAIT_MASK	(0xF << 12)
 #define CLK_DIS_WAIT_SHIFT	(12)
+#define RETAIN_FF_ENABLE_MASK	BIT(11)
 #define SW_OVERRIDE_MASK	BIT(2)
 #define HW_CONTROL_MASK		BIT(1)
 #define SW_COLLAPSE_MASK	BIT(0)
@@ -57,6 +58,7 @@
 	bool			toggle_mem;
 	bool			toggle_periph;
 	bool			toggle_logic;
+	bool			retain_ff_enable;
 	bool			resets_asserted;
 	bool			root_en;
 	bool			force_root_en;
@@ -311,6 +313,11 @@
 				goto end;
 			}
 		}
+
+		if (sc->retain_ff_enable && !(regval & RETAIN_FF_ENABLE_MASK)) {
+			regval |= RETAIN_FF_ENABLE_MASK;
+			regmap_write(sc->regmap, REG_OFFSET, regval);
+		}
 	} else {
 		for (i = 0; i < sc->reset_count; i++)
 			reset_control_deassert(sc->reset_clocks[i]);
@@ -735,6 +742,8 @@
 	retain_periph = of_property_read_bool(pdev->dev.of_node,
 					    "qcom,retain-periph");
 	sc->toggle_periph = !retain_periph;
+	sc->retain_ff_enable = of_property_read_bool(pdev->dev.of_node,
+						"qcom,retain-regs");
 	sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
 						"qcom,skip-logic-collapse");
 	support_hw_trigger = of_property_read_bool(pdev->dev.of_node,
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c
index 82ff076..1457715 100644
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[dp-pll] %s: " fmt, __func__
@@ -19,6 +19,7 @@
 #define DP_PHY_PD_CTL				0x0018
 #define DP_PHY_MODE				0x001C
 
+#define DP_PHY_AUX_CFG1				0x0024
 #define DP_PHY_AUX_CFG2				0x0028
 
 #define DP_PHY_VCO_DIV				0x0070
@@ -144,6 +145,9 @@
 		return -EINVAL;
 	}
 
+	if (is_gdsc_disabled(dp_res))
+		return 0;
+
 	rc = mdss_pll_resource_enable(dp_res, true);
 	if (rc) {
 		pr_err("Failed to enable dp_res resources\n");
@@ -402,7 +406,8 @@
 	struct dp_pll_db_7nm *pdb = (struct dp_pll_db_7nm *)dp_res->priv;
 	u32 bias_en, drvr_en;
 
-	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG2, 0x24);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG1, 0x13);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG2, 0xA4);
 	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
 	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x05);
 	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
@@ -636,6 +641,9 @@
 	vco = to_dp_vco_hw(hw);
 	dp_res = vco->priv;
 
+	if (is_gdsc_disabled(dp_res))
+		return 0;
+
 	rc = mdss_pll_resource_enable(dp_res, true);
 	if (rc) {
 		pr_err("Failed to enable mdss DP pll=%d\n", dp_res->index);
diff --git a/drivers/clk/qcom/videocc-kona.c b/drivers/clk/qcom/videocc-kona.c
index ca90ec4..a85a880 100644
--- a/drivers/clk/qcom/videocc-kona.c
+++ b/drivers/clk/qcom/videocc-kona.c
@@ -162,6 +162,7 @@
 		.name = "video_cc_mvs0_div_clk_src",
 		.parent_names = (const char *[]){ "video_cc_mvs0_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
@@ -174,6 +175,7 @@
 		.name = "video_cc_mvs0c_div2_div_clk_src",
 		.parent_names = (const char *[]){ "video_cc_mvs0_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
@@ -186,6 +188,7 @@
 		.name = "video_cc_mvs1_div_clk_src",
 		.parent_names = (const char *[]){ "video_cc_mvs1_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
@@ -198,6 +201,7 @@
 		.name = "video_cc_mvs1c_div2_div_clk_src",
 		.parent_names = (const char *[]){ "video_cc_mvs1_clk_src" },
 		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_regmap_div_ro_ops,
 	},
 };
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 67e73fd..69fb3af 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -382,7 +382,7 @@
 	COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
 			RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
 			RK2928_CLKGATE_CON(0), 13, GFLAGS),
-	COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pll", CLK_SET_RATE_PARENT,
+	COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT,
 			RK2928_CLKSEL_CON(9), 0,
 			RK2928_CLKGATE_CON(0), 14, GFLAGS,
 			&common_spdif_fracmux),
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 2d5d8b4..c4d0b6f 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -43,7 +43,7 @@
 	/* Read mdiv and fdiv from the fdbck register */
 	reg = readl(socfpgaclk->hw.reg + 0x4);
 	mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
-	vco_freq = (unsigned long long)parent_rate * (mdiv + 6);
+	vco_freq = (unsigned long long)vco_freq * (mdiv + 6);
 
 	return (unsigned long)vco_freq;
 }
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 5b238fc..8281dfb 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -12,17 +12,17 @@
 
 #include "stratix10-clk.h"
 
-static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk",
-					"f2s_free_clk",};
+static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
+					"f2s-free-clk",};
 static const char * const cntr_mux[] = { "main_pll", "periph_pll",
-					 "osc1", "cb_intosc_hs_div2_clk",
-					 "f2s_free_clk"};
-static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",};
+					 "osc1", "cb-intosc-hs-div2-clk",
+					 "f2s-free-clk"};
+static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
 
 static const char * const noc_free_mux[] = {"main_noc_base_clk",
 					    "peri_noc_base_clk",
-					    "osc1", "cb_intosc_hs_div2_clk",
-					    "f2s_free_clk"};
+					    "osc1", "cb-intosc-hs-div2-clk",
+					    "f2s-free-clk"};
 
 static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
 static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
@@ -33,14 +33,14 @@
 static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
 static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
 
-static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"};
+static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
 static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
 static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
 
 static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
 					    "peri_mpu_base_clk",
-					    "osc1", "cb_intosc_hs_div2_clk",
-					    "f2s_free_clk"};
+					    "osc1", "cb-intosc-hs-div2-clk",
+					    "f2s-free-clk"};
 
 /* clocks in AO (always on) controller */
 static const struct stratix10_pll_clock s10_pll_clks[] = {
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index 4e20733..9e3944f 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -19,6 +19,17 @@
 	unsigned long	m, min_m, max_m;
 };
 
+static unsigned long ccu_nm_calc_rate(unsigned long parent,
+				      unsigned long n, unsigned long m)
+{
+	u64 rate = parent;
+
+	rate *= n;
+	do_div(rate, m);
+
+	return rate;
+}
+
 static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
 			     struct _ccu_nm *nm)
 {
@@ -28,7 +39,8 @@
 
 	for (_n = nm->min_n; _n <= nm->max_n; _n++) {
 		for (_m = nm->min_m; _m <= nm->max_m; _m++) {
-			unsigned long tmp_rate = parent * _n  / _m;
+			unsigned long tmp_rate = ccu_nm_calc_rate(parent,
+								  _n, _m);
 
 			if (tmp_rate > rate)
 				continue;
@@ -100,7 +112,7 @@
 	if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
 		rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
 	else
-		rate = parent_rate * n / m;
+		rate = ccu_nm_calc_rate(parent_rate, n, m);
 
 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
 		rate /= nm->fixed_post_div;
@@ -142,7 +154,7 @@
 	_nm.max_m = nm->m.max ?: 1 << nm->m.width;
 
 	ccu_nm_find_best(*parent_rate, rate, &_nm);
-	rate = *parent_rate * _nm.n / _nm.m;
+	rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
 
 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
 		rate /= nm->fixed_post_div;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index a11f4ba..316d48d 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -290,6 +290,7 @@
 
 config ARC_TIMERS
 	bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
+	depends on GENERIC_SCHED_CLOCK
 	select TIMER_OF
 	help
 	  These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index 20da9b1..b28970c 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -23,6 +23,7 @@
 #include <linux/cpu.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/sched_clock.h>
 
 #include <soc/arc/timers.h>
 #include <soc/arc/mcip.h>
@@ -88,6 +89,11 @@
 	return (((u64)h) << 32) | l;
 }
 
+static notrace u64 arc_gfrc_clock_read(void)
+{
+	return arc_read_gfrc(NULL);
+}
+
 static struct clocksource arc_counter_gfrc = {
 	.name   = "ARConnect GFRC",
 	.rating = 400,
@@ -111,6 +117,8 @@
 	if (ret)
 		return ret;
 
+	sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq);
+
 	return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
 }
 TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
@@ -139,6 +147,11 @@
 	return (((u64)h) << 32) | l;
 }
 
+static notrace u64 arc_rtc_clock_read(void)
+{
+	return arc_read_rtc(NULL);
+}
+
 static struct clocksource arc_counter_rtc = {
 	.name   = "ARCv2 RTC",
 	.rating = 350,
@@ -170,6 +183,8 @@
 
 	write_aux_reg(AUX_RTC_CTRL, 1);
 
+	sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq);
+
 	return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
 }
 TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
@@ -185,6 +200,11 @@
 	return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
 }
 
+static notrace u64 arc_timer1_clock_read(void)
+{
+	return arc_read_timer1(NULL);
+}
+
 static struct clocksource arc_counter_timer1 = {
 	.name   = "ARC Timer1",
 	.rating = 300,
@@ -209,6 +229,8 @@
 	write_aux_reg(ARC_REG_TIMER1_CNT, 0);
 	write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
 
+	sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
+
 	return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
 }
 
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 62d2469..9701107 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -181,8 +181,7 @@
 	int irq;
 	struct clk *clk;
 	unsigned long rate;
-	struct device_node *pri_node;
-	struct device_node *sec_node;
+	struct device_node *alias_node;
 
 	base = of_io_request_and_map(node, 0, "integrator-timer");
 	if (IS_ERR(base))
@@ -204,7 +203,18 @@
 		return err;
 	}
 
-	pri_node = of_find_node_by_path(path);
+	alias_node = of_find_node_by_path(path);
+
+	/*
+	 * The pointer is used as an identifier not as a pointer, we
+	 * can drop the refcount on the of__node immediately after
+	 * getting it.
+	 */
+	of_node_put(alias_node);
+
+	if (node == alias_node)
+		/* The primary timer lacks IRQ, use as clocksource */
+		return integrator_clocksource_init(rate, base);
 
 	err = of_property_read_string(of_aliases,
 				"arm,timer-secondary", &path);
@@ -213,14 +223,11 @@
 		return err;
 	}
 
+	alias_node = of_find_node_by_path(path);
 
-	sec_node = of_find_node_by_path(path);
+	of_node_put(alias_node);
 
-	if (node == pri_node)
-		/* The primary timer lacks IRQ, use as clocksource */
-		return integrator_clocksource_init(rate, base);
-
-	if (node == sec_node) {
+	if (node == alias_node) {
 		/* The secondary timer will drive the clock event */
 		irq = irq_of_parse_and_map(node, 0);
 		return integrator_clockevent_init(rate, base, irq);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2d59e72..4ddbc66 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -26,7 +26,6 @@
 #include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
-#include <linux/sched/cpufreq.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 9df4413..5e81669 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -11,17 +11,21 @@
 #include <linux/of_platform.h>
 #include <linux/pm_opp.h>
 #include <linux/energy_model.h>
+#include <linux/sched.h>
+#include <linux/cpu_cooling.h>
 
 #define LUT_MAX_ENTRIES			40U
 #define CORE_COUNT_VAL(val)		(((val) & (GENMASK(18, 16))) >> 16)
 #define LUT_ROW_SIZE			4
 #define CLK_HW_DIV			2
+#define CYCLE_CNTR_OFFSET(c, m)		((c - cpumask_first(m) + 1) * 4)
 
 enum {
 	REG_ENABLE,
 	REG_FREQ_LUT_TABLE,
 	REG_VOLT_LUT_TABLE,
 	REG_PERF_STATE,
+	REG_CYCLE_CNTR,
 
 	REG_ARRAY_SIZE,
 };
@@ -35,15 +39,56 @@
 	unsigned long cpu_hw_rate;
 };
 
+struct cpufreq_counter {
+	u64 total_cycle_counter;
+	u32 prev_cycle_counter;
+	spinlock_t lock;
+};
+
 static const u16 cpufreq_qcom_std_offsets[REG_ARRAY_SIZE] = {
 	[REG_ENABLE]		= 0x0,
 	[REG_FREQ_LUT_TABLE]	= 0x100,
 	[REG_VOLT_LUT_TABLE]	= 0x200,
 	[REG_PERF_STATE]	= 0x320,
+	[REG_CYCLE_CNTR]	= 0x3c4,
 };
 
+
+static struct cpufreq_counter qcom_cpufreq_counter[NR_CPUS];
 static struct cpufreq_qcom *qcom_freq_domain_map[NR_CPUS];
 
+static u64 qcom_cpufreq_get_cpu_cycle_counter(int cpu)
+{
+	struct cpufreq_counter *cpu_counter;
+	struct cpufreq_qcom *cpu_domain;
+	u64 cycle_counter_ret;
+	unsigned long flags;
+	u16 offset;
+	u32 val;
+
+	cpu_domain = qcom_freq_domain_map[cpu];
+	cpu_counter = &qcom_cpufreq_counter[cpu];
+	spin_lock_irqsave(&cpu_counter->lock, flags);
+
+	offset = CYCLE_CNTR_OFFSET(cpu, &cpu_domain->related_cpus);
+	val = readl_relaxed(cpu_domain->reg_bases[REG_CYCLE_CNTR] + offset);
+
+	if (val < cpu_counter->prev_cycle_counter) {
+		/* Handle counter overflow */
+		cpu_counter->total_cycle_counter += UINT_MAX -
+			cpu_counter->prev_cycle_counter + val;
+		cpu_counter->prev_cycle_counter = val;
+	} else {
+		cpu_counter->total_cycle_counter += val -
+			cpu_counter->prev_cycle_counter;
+		cpu_counter->prev_cycle_counter = val;
+	}
+	cycle_counter_ret = cpu_counter->total_cycle_counter;
+	spin_unlock_irqrestore(&cpu_counter->lock, flags);
+
+	return cycle_counter_ret;
+}
+
 static int
 qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
 			     unsigned int index)
@@ -134,6 +179,35 @@
 	NULL
 };
 
+static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
+{
+	static struct thermal_cooling_device *cdev[NR_CPUS];
+	struct device_node *np;
+	unsigned int cpu = policy->cpu;
+
+	if (cdev[cpu])
+		return;
+
+	np = of_cpu_device_node_get(cpu);
+	if (WARN_ON(!np))
+		return;
+
+	/*
+	 * For now, just loading the cooling device;
+	 * thermal DT code takes care of matching them.
+	 */
+	if (of_find_property(np, "#cooling-cells", NULL)) {
+		cdev[cpu] = of_cpufreq_cooling_register(policy);
+		if (IS_ERR(cdev[cpu])) {
+			pr_err("running cpufreq for CPU%d without cooling dev: %ld\n",
+			       cpu, PTR_ERR(cdev[cpu]));
+			cdev[cpu] = NULL;
+		}
+	}
+
+	of_node_put(np);
+}
+
 static struct cpufreq_driver cpufreq_qcom_hw_driver = {
 	.flags		= CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
 			  CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
@@ -145,6 +219,7 @@
 	.name		= "qcom-cpufreq-hw",
 	.attr		= qcom_cpufreq_hw_attr,
 	.boost_enabled	= true,
+	.ready		= qcom_cpufreq_ready,
 };
 
 static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
@@ -342,6 +417,9 @@
 static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
 {
 	int rc;
+	struct cpu_cycle_counter_cb cycle_counter_cb = {
+		.get_cpu_cycle_counter = qcom_cpufreq_get_cpu_cycle_counter,
+	};
 
 	/* Get the bases of cpufreq for domains */
 	rc = qcom_resources_init(pdev);
@@ -356,7 +434,14 @@
 		return rc;
 	}
 
+	rc = register_cpu_cycle_counter_cb(&cycle_counter_cb);
+	if (rc) {
+		dev_err(&pdev->dev, "cycle counter cb failed to register\n");
+		return rc;
+	}
+
 	dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
+	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 80a7f8d..80001120 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -53,9 +53,9 @@
 	int ret;
 	struct scmi_data *priv = policy->driver_data;
 	struct scmi_perf_ops *perf_ops = handle->perf_ops;
-	u64 freq = policy->freq_table[index].frequency * 1000;
+	u64 freq = policy->freq_table[index].frequency;
 
-	ret = perf_ops->freq_set(handle, priv->domain_id, freq, false);
+	ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
 	if (!ret)
 		arch_set_freq_scale(policy->related_cpus, freq,
 				    policy->cpuinfo.max_freq);
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 9e56bc4..74c2479 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -247,7 +247,13 @@
 		return -ENODEV;
 
 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
-		if (lppaca_shared_proc(get_lppaca())) {
+		/*
+		 * Use local_paca instead of get_lppaca() since
+		 * preemption is not disabled, and it is not required in
+		 * fact, since lppaca_ptr does not need to be the value
+		 * associated to the current CPU, it can be from any CPU.
+		 */
+		if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
 			cpuidle_state_table = shared_states;
 			max_idle_state = ARRAY_SIZE(shared_states);
 		} else {
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index a476900..b524fe9 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
  * Copyright (C) 2009 Intel Corporation
  */
@@ -28,6 +28,7 @@
 #include <linux/cpu_pm.h>
 #include <linux/cpuhotplug.h>
 #include <linux/sched/clock.h>
+#include <linux/sched/stat.h>
 #include <soc/qcom/pm.h>
 #include <soc/qcom/event_timer.h>
 #include <soc/qcom/lpm_levels.h>
@@ -573,12 +574,18 @@
 
 static inline bool is_cpu_biased(int cpu)
 {
-	return false;
+	u64 now = sched_clock();
+	u64 last = sched_get_cpu_last_busy_time(cpu);
+
+	if (!last)
+		return false;
+
+	return (now - last) < BIAS_HYST;
 }
 
 static inline bool lpm_disallowed(s64 sleep_us, int cpu)
 {
-	if (sleep_disabled || is_cpu_biased(cpu))
+	if ((sleep_disabled && !cpu_isolated(cpu)) || is_cpu_biased(cpu))
 		return true;
 
 	if (sleep_us < 0)
@@ -631,7 +638,7 @@
 				next_wakeup_us = next_event_us - lvl_latency_us;
 		}
 
-		if (!i) {
+		if (!i && !cpu_isolated(dev->cpu)) {
 			/*
 			 * If the next_wake_us itself is not sufficient for
 			 * deeper low power modes than clock gating do not
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index a8c4ce0..a825b64 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -681,6 +681,7 @@
 	depends on ARCH_BCM_IPROC
 	depends on MAILBOX
 	default m
+	select CRYPTO_AUTHENC
 	select CRYPTO_DES
 	select CRYPTO_MD5
 	select CRYPTO_SHA1
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 2d1f1db..cd46463 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -2845,44 +2845,28 @@
 	struct spu_hw *spu = &iproc_priv.spu;
 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
 	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
-	struct rtattr *rta = (void *)key;
-	struct crypto_authenc_key_param *param;
-	const u8 *origkey = key;
-	const unsigned int origkeylen = keylen;
-
-	int ret = 0;
+	struct crypto_authenc_keys keys;
+	int ret;
 
 	flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
 		 keylen);
 	flow_dump("  key: ", key, keylen);
 
-	if (!RTA_OK(rta, keylen))
-		goto badkey;
-	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
-		goto badkey;
-	if (RTA_PAYLOAD(rta) < sizeof(*param))
+	ret = crypto_authenc_extractkeys(&keys, key, keylen);
+	if (ret)
 		goto badkey;
 
-	param = RTA_DATA(rta);
-	ctx->enckeylen = be32_to_cpu(param->enckeylen);
-
-	key += RTA_ALIGN(rta->rta_len);
-	keylen -= RTA_ALIGN(rta->rta_len);
-
-	if (keylen < ctx->enckeylen)
-		goto badkey;
-	if (ctx->enckeylen > MAX_KEY_SIZE)
+	if (keys.enckeylen > MAX_KEY_SIZE ||
+	    keys.authkeylen > MAX_KEY_SIZE)
 		goto badkey;
 
-	ctx->authkeylen = keylen - ctx->enckeylen;
+	ctx->enckeylen = keys.enckeylen;
+	ctx->authkeylen = keys.authkeylen;
 
-	if (ctx->authkeylen > MAX_KEY_SIZE)
-		goto badkey;
-
-	memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
+	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
 	/* May end up padding auth key. So make sure it's zeroed. */
 	memset(ctx->authkey, 0, sizeof(ctx->authkey));
-	memcpy(ctx->authkey, key, ctx->authkeylen);
+	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
 
 	switch (ctx->alg->cipher_info.alg) {
 	case CIPHER_ALG_DES:
@@ -2890,7 +2874,7 @@
 			u32 tmp[DES_EXPKEY_WORDS];
 			u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
 
-			if (des_ekey(tmp, key) == 0) {
+			if (des_ekey(tmp, keys.enckey) == 0) {
 				if (crypto_aead_get_flags(cipher) &
 				    CRYPTO_TFM_REQ_WEAK_KEY) {
 					crypto_aead_set_flags(cipher, flags);
@@ -2905,7 +2889,7 @@
 		break;
 	case CIPHER_ALG_3DES:
 		if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
-			const u32 *K = (const u32 *)key;
+			const u32 *K = (const u32 *)keys.enckey;
 			u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
 
 			if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
@@ -2956,9 +2940,7 @@
 		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
 		ctx->fallback_cipher->base.crt_flags |=
 		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
-		ret =
-		    crypto_aead_setkey(ctx->fallback_cipher, origkey,
-				       origkeylen);
+		ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
 		if (ret) {
 			flow_log("  fallback setkey() returned:%d\n", ret);
 			tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 43975ab..f84ca2f 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1131,13 +1131,16 @@
 
 	desc = edesc->hw_desc;
 
-	state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, state->buf_dma)) {
-		dev_err(jrdev, "unable to map src\n");
-		goto unmap;
-	}
+	if (buflen) {
+		state->buf_dma = dma_map_single(jrdev, buf, buflen,
+						DMA_TO_DEVICE);
+		if (dma_mapping_error(jrdev, state->buf_dma)) {
+			dev_err(jrdev, "unable to map src\n");
+			goto unmap;
+		}
 
-	append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+		append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+	}
 
 	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
 						digestsize);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c
index 2ae6124..5d54ebc 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_algs.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c
@@ -73,7 +73,7 @@
 static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
 {
 	struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
-	void *fctx;
+	struct crypto_ctx_hdr *chdr;
 
 	/* get the first device */
 	nctx->ndev = nitrox_get_first_device();
@@ -81,12 +81,14 @@
 		return -ENODEV;
 
 	/* allocate nitrox crypto context */
-	fctx = crypto_alloc_context(nctx->ndev);
-	if (!fctx) {
+	chdr = crypto_alloc_context(nctx->ndev);
+	if (!chdr) {
 		nitrox_put_device(nctx->ndev);
 		return -ENOMEM;
 	}
-	nctx->u.ctx_handle = (uintptr_t)fctx;
+	nctx->chdr = chdr;
+	nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
+					 sizeof(struct ctx_hdr));
 	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
 				    sizeof(struct nitrox_kcrypt_request));
 	return 0;
@@ -102,7 +104,7 @@
 
 		memset(&fctx->crypto, 0, sizeof(struct crypto_keys));
 		memset(&fctx->auth, 0, sizeof(struct auth_keys));
-		crypto_free_context((void *)fctx);
+		crypto_free_context((void *)nctx->chdr);
 	}
 	nitrox_put_device(nctx->ndev);
 
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 4d31df0..28baf1a 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -146,20 +146,31 @@
 void *crypto_alloc_context(struct nitrox_device *ndev)
 {
 	struct ctx_hdr *ctx;
+	struct crypto_ctx_hdr *chdr;
 	void *vaddr;
 	dma_addr_t dma;
 
-	vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
-	if (!vaddr)
+	chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
+	if (!chdr)
 		return NULL;
 
+	vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
+	if (!vaddr) {
+		kfree(chdr);
+		return NULL;
+	}
+
 	/* fill meta data */
 	ctx = vaddr;
 	ctx->pool = ndev->ctx_pool;
 	ctx->dma = dma;
 	ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
 
-	return ((u8 *)vaddr + sizeof(struct ctx_hdr));
+	chdr->pool = ndev->ctx_pool;
+	chdr->dma = dma;
+	chdr->vaddr = vaddr;
+
+	return chdr;
 }
 
 /**
@@ -168,13 +179,14 @@
  */
 void crypto_free_context(void *ctx)
 {
-	struct ctx_hdr *ctxp;
+	struct crypto_ctx_hdr *ctxp;
 
 	if (!ctx)
 		return;
 
-	ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
-	dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
+	ctxp = ctx;
+	dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
+	kfree(ctxp);
 }
 
 /**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index d091b6f..19f0a20 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -181,12 +181,19 @@
 	struct auth_keys auth;
 };
 
+struct crypto_ctx_hdr {
+	struct dma_pool *pool;
+	dma_addr_t dma;
+	void *vaddr;
+};
+
 struct nitrox_crypto_ctx {
 	struct nitrox_device *ndev;
 	union {
 		u64 ctx_handle;
 		struct flexi_crypto_context *fctx;
 	} u;
+	struct crypto_ctx_hdr *chdr;
 };
 
 struct nitrox_kcrypt_request {
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 01b82b8..5852d29 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -540,13 +540,12 @@
 			  unsigned int keylen)
 {
 	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct rtattr *rta = (struct rtattr *)key;
 	struct cc_crypto_req cc_req = {};
-	struct crypto_authenc_key_param *param;
 	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
-	int rc = -EINVAL;
 	unsigned int seq_len = 0;
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
+	const u8 *enckey, *authkey;
+	int rc;
 
 	dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
 		ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
@@ -554,35 +553,33 @@
 	/* STAT_PHASE_0: Init and sanity checks */
 
 	if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
-		if (!RTA_OK(rta, keylen))
+		struct crypto_authenc_keys keys;
+
+		rc = crypto_authenc_extractkeys(&keys, key, keylen);
+		if (rc)
 			goto badkey;
-		if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
-			goto badkey;
-		if (RTA_PAYLOAD(rta) < sizeof(*param))
-			goto badkey;
-		param = RTA_DATA(rta);
-		ctx->enc_keylen = be32_to_cpu(param->enckeylen);
-		key += RTA_ALIGN(rta->rta_len);
-		keylen -= RTA_ALIGN(rta->rta_len);
-		if (keylen < ctx->enc_keylen)
-			goto badkey;
-		ctx->auth_keylen = keylen - ctx->enc_keylen;
+		enckey = keys.enckey;
+		authkey = keys.authkey;
+		ctx->enc_keylen = keys.enckeylen;
+		ctx->auth_keylen = keys.authkeylen;
 
 		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
 			/* the nonce is stored in bytes at end of key */
+			rc = -EINVAL;
 			if (ctx->enc_keylen <
 			    (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
 				goto badkey;
 			/* Copy nonce from last 4 bytes in CTR key to
 			 *  first 4 bytes in CTR IV
 			 */
-			memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
-			       ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
-			       CTR_RFC3686_NONCE_SIZE);
+			memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
+			       CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
 			/* Set CTR key size */
 			ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
 		}
 	} else { /* non-authenc - has just one key */
+		enckey = key;
+		authkey = NULL;
 		ctx->enc_keylen = keylen;
 		ctx->auth_keylen = 0;
 	}
@@ -594,13 +591,14 @@
 	/* STAT_PHASE_1: Copy key to ctx */
 
 	/* Get key material */
-	memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
+	memcpy(ctx->enckey, enckey, ctx->enc_keylen);
 	if (ctx->enc_keylen == 24)
 		memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
 	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
-		memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
+		memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
+		       ctx->auth_keylen);
 	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
-		rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+		rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
 		if (rc)
 			goto badkey;
 	}
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 461b97e..1ff8738 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -303,7 +303,10 @@
 
 static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
 {
-	int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
+	int hdrlen;
+
+	hdrlen = sizeof(struct fw_ulptx_wr) +
+		 sizeof(struct chcr_ipsec_req) + kctx_len;
 
 	hdrlen += sizeof(struct cpl_tx_pkt);
 	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 6988012..f4f3e9a 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1361,23 +1361,18 @@
 	struct talitos_private *priv = dev_get_drvdata(dev);
 	bool is_sec1 = has_ftr_sec1(priv);
 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
-	void *err;
 
 	if (cryptlen + authsize > max_len) {
 		dev_err(dev, "length exceeds h/w max limit\n");
 		return ERR_PTR(-EINVAL);
 	}
 
-	if (ivsize)
-		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
-
 	if (!dst || dst == src) {
 		src_len = assoclen + cryptlen + authsize;
 		src_nents = sg_nents_for_len(src, src_len);
 		if (src_nents < 0) {
 			dev_err(dev, "Invalid number of src SG.\n");
-			err = ERR_PTR(-EINVAL);
-			goto error_sg;
+			return ERR_PTR(-EINVAL);
 		}
 		src_nents = (src_nents == 1) ? 0 : src_nents;
 		dst_nents = dst ? src_nents : 0;
@@ -1387,16 +1382,14 @@
 		src_nents = sg_nents_for_len(src, src_len);
 		if (src_nents < 0) {
 			dev_err(dev, "Invalid number of src SG.\n");
-			err = ERR_PTR(-EINVAL);
-			goto error_sg;
+			return ERR_PTR(-EINVAL);
 		}
 		src_nents = (src_nents == 1) ? 0 : src_nents;
 		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
 		dst_nents = sg_nents_for_len(dst, dst_len);
 		if (dst_nents < 0) {
 			dev_err(dev, "Invalid number of dst SG.\n");
-			err = ERR_PTR(-EINVAL);
-			goto error_sg;
+			return ERR_PTR(-EINVAL);
 		}
 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
 	}
@@ -1423,11 +1416,14 @@
 	/* if its a ahash, add space for a second desc next to the first one */
 	if (is_sec1 && !dst)
 		alloc_len += sizeof(struct talitos_desc);
+	alloc_len += ivsize;
 
 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
-	if (!edesc) {
-		err = ERR_PTR(-ENOMEM);
-		goto error_sg;
+	if (!edesc)
+		return ERR_PTR(-ENOMEM);
+	if (ivsize) {
+		iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
+		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
 	}
 	memset(&edesc->desc, 0, sizeof(edesc->desc));
 
@@ -1445,10 +1441,6 @@
 						     DMA_BIDIRECTIONAL);
 	}
 	return edesc;
-error_sg:
-	if (iv_dma)
-		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
-	return err;
 }
 
 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 99e2aac..2c1f459 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -48,9 +48,8 @@
 	percpu_ref_exit(ref);
 }
 
-static void dax_pmem_percpu_kill(void *data)
+static void dax_pmem_percpu_kill(struct percpu_ref *ref)
 {
-	struct percpu_ref *ref = data;
 	struct dax_pmem *dax_pmem = to_dax_pmem(ref);
 
 	dev_dbg(dax_pmem->dev, "trace\n");
@@ -112,17 +111,10 @@
 	}
 
 	dax_pmem->pgmap.ref = &dax_pmem->ref;
+	dax_pmem->pgmap.kill = dax_pmem_percpu_kill;
 	addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
-	if (IS_ERR(addr)) {
-		devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
-		percpu_ref_exit(&dax_pmem->ref);
+	if (IS_ERR(addr))
 		return PTR_ERR(addr);
-	}
-
-	rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
-							&dax_pmem->ref);
-	if (rc)
-		return rc;
 
 	/* adjust the dax_region resource to the start of data */
 	memcpy(&res, &dax_pmem->pgmap.res, sizeof(res));
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c516276..d984509 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -9,7 +9,10 @@
 cflags-$(CONFIG_X86_64)		:= -mcmodel=small
 cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ -O2 \
 				   -fPIC -fno-strict-aliasing -mno-red-zone \
-				   -mno-mmx -mno-sse -fshort-wchar
+				   -mno-mmx -mno-sse -fshort-wchar \
+				   -Wno-pointer-sign \
+				   $(call cc-disable-warning, address-of-packed-member) \
+				   $(call cc-disable-warning, gnu)
 
 # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
 # disable the stackleak plugin
diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c
index 0f3fc4d..b7fa43d 100644
--- a/drivers/firmware/qcom/tz_log.c
+++ b/drivers/firmware/qcom/tz_log.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 #include <linux/debugfs.h>
 #include <linux/errno.h>
@@ -877,7 +877,7 @@
 
 	if (desc.ret[0] != QSEOS_RESULT_SUCCESS) {
 		pr_err(
-		"%s: scm_call to register log buf failed, resp result =%d\n",
+		"%s: scm_call to register log buf failed, resp result =%lld\n",
 		__func__, desc.ret[0]);
 		goto err;
 	}
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 7fa7936..68e4b2b 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -468,14 +468,6 @@
 		goto err_unmap;
 	}
 
-	ret = driver_create_file(&altera_cvp_driver.driver,
-				 &driver_attr_chkcfg);
-	if (ret) {
-		dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n");
-		fpga_mgr_unregister(mgr);
-		goto err_unmap;
-	}
-
 	return 0;
 
 err_unmap:
@@ -493,7 +485,6 @@
 	struct altera_cvp_conf *conf = mgr->priv;
 	u16 cmd;
 
-	driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
 	fpga_mgr_unregister(mgr);
 	pci_iounmap(pdev, conf->map);
 	pci_release_region(pdev, CVP_BAR);
@@ -502,7 +493,30 @@
 	pci_write_config_word(pdev, PCI_COMMAND, cmd);
 }
 
-module_pci_driver(altera_cvp_driver);
+static int __init altera_cvp_init(void)
+{
+	int ret;
+
+	ret = pci_register_driver(&altera_cvp_driver);
+	if (ret)
+		return ret;
+
+	ret = driver_create_file(&altera_cvp_driver.driver,
+				 &driver_attr_chkcfg);
+	if (ret)
+		pr_warn("Can't create sysfs chkcfg file\n");
+
+	return 0;
+}
+
+static void __exit altera_cvp_exit(void)
+{
+	driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
+	pci_unregister_driver(&altera_cvp_driver);
+}
+
+module_init(altera_cvp_init);
+module_exit(altera_cvp_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 6b11f13..7f9e030 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -66,8 +66,10 @@
 static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
 					    unsigned int nr, int value)
 {
-	if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT))
+	if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) {
+		altr_a10sr_gpio_set(gc, nr, value);
 		return 0;
+	}
 	return -EINVAL;
 }
 
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index e0d6a0a..e41223c 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -180,7 +180,18 @@
 
 static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset)
 {
-	return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
+	struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
+
+	switch (sprd_eic->type) {
+	case SPRD_EIC_DEBOUNCE:
+		return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
+	case SPRD_EIC_ASYNC:
+		return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA);
+	case SPRD_EIC_SYNC:
+		return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA);
+	default:
+		return -ENOTSUPP;
+	}
 }
 
 static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -368,6 +379,7 @@
 			irq_set_handler_locked(data, handle_edge_irq);
 			break;
 		case IRQ_TYPE_EDGE_BOTH:
+			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
 			sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
 			irq_set_handler_locked(data, handle_edge_irq);
 			break;
diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
index 05813fb..647dfbb 100644
--- a/drivers/gpio/gpio-max7301.c
+++ b/drivers/gpio/gpio-max7301.c
@@ -25,7 +25,7 @@
 	struct spi_device *spi = to_spi_device(dev);
 	u16 word = ((reg & 0x7F) << 8) | (val & 0xFF);
 
-	return spi_write(spi, (const u8 *)&word, sizeof(word));
+	return spi_write_then_read(spi, &word, sizeof(word), NULL, 0);
 }
 
 /* A read from the MAX7301 means two transfers; here, one message each */
@@ -37,14 +37,8 @@
 	struct spi_device *spi = to_spi_device(dev);
 
 	word = 0x8000 | (reg << 8);
-	ret = spi_write(spi, (const u8 *)&word, sizeof(word));
-	if (ret)
-		return ret;
-	/*
-	 * This relies on the fact, that a transfer with NULL tx_buf shifts out
-	 * zero bytes (=NOOP for MAX7301)
-	 */
-	ret = spi_read(spi, (u8 *)&word, sizeof(word));
+	ret = spi_write_then_read(spi, &word, sizeof(word), &word,
+				  sizeof(word));
 	if (ret)
 		return ret;
 	return word & 0xff;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 6e02148..adc768f 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -773,9 +773,6 @@
 				     "marvell,armada-370-gpio"))
 		return 0;
 
-	if (IS_ERR(mvchip->clk))
-		return PTR_ERR(mvchip->clk);
-
 	/*
 	 * There are only two sets of PWM configuration registers for
 	 * all the GPIO lines on those SoCs which this driver reserves
@@ -786,6 +783,9 @@
 	if (!res)
 		return 0;
 
+	if (IS_ERR(mvchip->clk))
+		return PTR_ERR(mvchip->clk);
+
 	/*
 	 * Use set A for lines of GPIO chip with id 0, B for GPIO chip
 	 * with id 1. Don't allow further GPIO chips to be used for PWM.
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index adf72dd..68a35b6 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -84,6 +84,7 @@
  */
 struct pcf857x {
 	struct gpio_chip	chip;
+	struct irq_chip		irqchip;
 	struct i2c_client	*client;
 	struct mutex		lock;		/* protect 'out' */
 	unsigned		out;		/* software latch */
@@ -252,18 +253,6 @@
 	mutex_unlock(&gpio->lock);
 }
 
-static struct irq_chip pcf857x_irq_chip = {
-	.name		= "pcf857x",
-	.irq_enable	= pcf857x_irq_enable,
-	.irq_disable	= pcf857x_irq_disable,
-	.irq_ack	= noop,
-	.irq_mask	= noop,
-	.irq_unmask	= noop,
-	.irq_set_wake	= pcf857x_irq_set_wake,
-	.irq_bus_lock		= pcf857x_irq_bus_lock,
-	.irq_bus_sync_unlock	= pcf857x_irq_bus_sync_unlock,
-};
-
 /*-------------------------------------------------------------------------*/
 
 static int pcf857x_probe(struct i2c_client *client,
@@ -376,8 +365,17 @@
 
 	/* Enable irqchip if we have an interrupt */
 	if (client->irq) {
+		gpio->irqchip.name = "pcf857x",
+		gpio->irqchip.irq_enable = pcf857x_irq_enable,
+		gpio->irqchip.irq_disable = pcf857x_irq_disable,
+		gpio->irqchip.irq_ack = noop,
+		gpio->irqchip.irq_mask = noop,
+		gpio->irqchip.irq_unmask = noop,
+		gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
+		gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
+		gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
 		status = gpiochip_irqchip_add_nested(&gpio->chip,
-						     &pcf857x_irq_chip,
+						     &gpio->irqchip,
 						     0, handle_level_irq,
 						     IRQ_TYPE_NONE);
 		if (status) {
@@ -392,7 +390,7 @@
 		if (status)
 			goto fail;
 
-		gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip,
+		gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
 					    client->irq);
 		gpio->irq_parent = client->irq;
 	}
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 2afd9de..dc42571 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -54,6 +54,7 @@
 
 	void __iomem		*base;
 	struct gpio_chip	gc;
+	struct irq_chip		irq_chip;
 	int			parent_irq;
 
 #ifdef CONFIG_PM
@@ -281,15 +282,6 @@
 	return irq_set_irq_wake(pl061->parent_irq, state);
 }
 
-static struct irq_chip pl061_irqchip = {
-	.name		= "pl061",
-	.irq_ack	= pl061_irq_ack,
-	.irq_mask	= pl061_irq_mask,
-	.irq_unmask	= pl061_irq_unmask,
-	.irq_set_type	= pl061_irq_type,
-	.irq_set_wake	= pl061_irq_set_wake,
-};
-
 static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
 {
 	struct device *dev = &adev->dev;
@@ -328,6 +320,13 @@
 	/*
 	 * irq_chip support
 	 */
+	pl061->irq_chip.name = dev_name(dev);
+	pl061->irq_chip.irq_ack	= pl061_irq_ack;
+	pl061->irq_chip.irq_mask = pl061_irq_mask;
+	pl061->irq_chip.irq_unmask = pl061_irq_unmask;
+	pl061->irq_chip.irq_set_type = pl061_irq_type;
+	pl061->irq_chip.irq_set_wake = pl061_irq_set_wake;
+
 	writeb(0, pl061->base + GPIOIE); /* disable irqs */
 	irq = adev->irq[0];
 	if (irq < 0) {
@@ -336,14 +335,14 @@
 	}
 	pl061->parent_irq = irq;
 
-	ret = gpiochip_irqchip_add(&pl061->gc, &pl061_irqchip,
+	ret = gpiochip_irqchip_add(&pl061->gc, &pl061->irq_chip,
 				   0, handle_bad_irq,
 				   IRQ_TYPE_NONE);
 	if (ret) {
 		dev_info(&adev->dev, "could not add irqchip\n");
 		return ret;
 	}
-	gpiochip_set_chained_irqchip(&pl061->gc, &pl061_irqchip,
+	gpiochip_set_chained_irqchip(&pl061->gc, &pl061->irq_chip,
 				     irq, pl061_irq_handler);
 
 	amba_set_drvdata(adev, pl061);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 8b9d7e4..c5e009f 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -23,11 +23,28 @@
 
 #include "gpiolib.h"
 
+/**
+ * struct acpi_gpio_event - ACPI GPIO event handler data
+ *
+ * @node:	  list-entry of the events list of the struct acpi_gpio_chip
+ * @handle:	  handle of ACPI method to execute when the IRQ triggers
+ * @handler:	  irq_handler to pass to request_irq when requesting the IRQ
+ * @pin:	  GPIO pin number on the gpio_chip
+ * @irq:	  Linux IRQ number for the event, for request_ / free_irq
+ * @irqflags:     flags to pass to request_irq when requesting the IRQ
+ * @irq_is_wake:  If the ACPI flags indicate the IRQ is a wakeup source
+ * @is_requested: True if request_irq has been done
+ * @desc:	  gpio_desc for the GPIO pin for this event
+ */
 struct acpi_gpio_event {
 	struct list_head node;
 	acpi_handle handle;
+	irq_handler_t handler;
 	unsigned int pin;
 	unsigned int irq;
+	unsigned long irqflags;
+	bool irq_is_wake;
+	bool irq_requested;
 	struct gpio_desc *desc;
 };
 
@@ -53,10 +70,10 @@
 
 /*
  * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
- * (so builtin drivers) we register the ACPI GpioInt event handlers from a
+ * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
  * late_initcall_sync handler, so that other builtin drivers can register their
  * OpRegions before the event handlers can run.  This list contains gpiochips
- * for which the acpi_gpiochip_request_interrupts() has been deferred.
+ * for which the acpi_gpiochip_request_irqs() call has been deferred.
  */
 static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
 static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
@@ -137,8 +154,42 @@
 }
 EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
 
-static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
-						   void *context)
+static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
+				      struct acpi_gpio_event *event)
+{
+	int ret, value;
+
+	ret = request_threaded_irq(event->irq, NULL, event->handler,
+				   event->irqflags, "ACPI:Event", event);
+	if (ret) {
+		dev_err(acpi_gpio->chip->parent,
+			"Failed to setup interrupt handler for %d\n",
+			event->irq);
+		return;
+	}
+
+	if (event->irq_is_wake)
+		enable_irq_wake(event->irq);
+
+	event->irq_requested = true;
+
+	/* Make sure we trigger the initial state of edge-triggered IRQs */
+	value = gpiod_get_raw_value_cansleep(event->desc);
+	if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+	    ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+		event->handler(event->irq, event);
+}
+
+static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
+{
+	struct acpi_gpio_event *event;
+
+	list_for_each_entry(event, &acpi_gpio->events, node)
+		acpi_gpiochip_request_irq(acpi_gpio, event);
+}
+
+static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
+					     void *context)
 {
 	struct acpi_gpio_chip *acpi_gpio = context;
 	struct gpio_chip *chip = acpi_gpio->chip;
@@ -147,8 +198,7 @@
 	struct acpi_gpio_event *event;
 	irq_handler_t handler = NULL;
 	struct gpio_desc *desc;
-	unsigned long irqflags;
-	int ret, pin, irq, value;
+	int ret, pin, irq;
 
 	if (!acpi_gpio_get_irq_resource(ares, &agpio))
 		return AE_OK;
@@ -179,8 +229,6 @@
 
 	gpiod_direction_input(desc);
 
-	value = gpiod_get_value_cansleep(desc);
-
 	ret = gpiochip_lock_as_irq(chip, pin);
 	if (ret) {
 		dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@@ -193,64 +241,42 @@
 		goto fail_unlock_irq;
 	}
 
-	irqflags = IRQF_ONESHOT;
-	if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
-		if (agpio->polarity == ACPI_ACTIVE_HIGH)
-			irqflags |= IRQF_TRIGGER_HIGH;
-		else
-			irqflags |= IRQF_TRIGGER_LOW;
-	} else {
-		switch (agpio->polarity) {
-		case ACPI_ACTIVE_HIGH:
-			irqflags |= IRQF_TRIGGER_RISING;
-			break;
-		case ACPI_ACTIVE_LOW:
-			irqflags |= IRQF_TRIGGER_FALLING;
-			break;
-		default:
-			irqflags |= IRQF_TRIGGER_RISING |
-				    IRQF_TRIGGER_FALLING;
-			break;
-		}
-	}
-
 	event = kzalloc(sizeof(*event), GFP_KERNEL);
 	if (!event)
 		goto fail_unlock_irq;
 
+	event->irqflags = IRQF_ONESHOT;
+	if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
+		if (agpio->polarity == ACPI_ACTIVE_HIGH)
+			event->irqflags |= IRQF_TRIGGER_HIGH;
+		else
+			event->irqflags |= IRQF_TRIGGER_LOW;
+	} else {
+		switch (agpio->polarity) {
+		case ACPI_ACTIVE_HIGH:
+			event->irqflags |= IRQF_TRIGGER_RISING;
+			break;
+		case ACPI_ACTIVE_LOW:
+			event->irqflags |= IRQF_TRIGGER_FALLING;
+			break;
+		default:
+			event->irqflags |= IRQF_TRIGGER_RISING |
+					   IRQF_TRIGGER_FALLING;
+			break;
+		}
+	}
+
 	event->handle = evt_handle;
+	event->handler = handler;
 	event->irq = irq;
+	event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
 	event->pin = pin;
 	event->desc = desc;
 
-	ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
-				   "ACPI:Event", event);
-	if (ret) {
-		dev_err(chip->parent,
-			"Failed to setup interrupt handler for %d\n",
-			event->irq);
-		goto fail_free_event;
-	}
-
-	if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
-		enable_irq_wake(irq);
-
 	list_add_tail(&event->node, &acpi_gpio->events);
 
-	/*
-	 * Make sure we trigger the initial state of the IRQ when using RISING
-	 * or FALLING.  Note we run the handlers on late_init, the AML code
-	 * may refer to OperationRegions from other (builtin) drivers which
-	 * may be probed after us.
-	 */
-	if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
-	    ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
-		handler(event->irq, event);
-
 	return AE_OK;
 
-fail_free_event:
-	kfree(event);
 fail_unlock_irq:
 	gpiochip_unlock_as_irq(chip, pin);
 fail_free_desc:
@@ -287,6 +313,9 @@
 	if (ACPI_FAILURE(status))
 		return;
 
+	acpi_walk_resources(handle, "_AEI",
+			    acpi_gpiochip_alloc_event, acpi_gpio);
+
 	mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
 	defer = !acpi_gpio_deferred_req_irqs_done;
 	if (defer)
@@ -297,8 +326,7 @@
 	if (defer)
 		return;
 
-	acpi_walk_resources(handle, "_AEI",
-			    acpi_gpiochip_request_interrupt, acpi_gpio);
+	acpi_gpiochip_request_irqs(acpi_gpio);
 }
 EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
 
@@ -335,10 +363,13 @@
 	list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
 		struct gpio_desc *desc;
 
-		if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
-			disable_irq_wake(event->irq);
+		if (event->irq_requested) {
+			if (event->irq_is_wake)
+				disable_irq_wake(event->irq);
 
-		free_irq(event->irq, event);
+			free_irq(event->irq, event);
+		}
+
 		desc = event->desc;
 		if (WARN_ON(IS_ERR(desc)))
 			continue;
@@ -1204,23 +1235,16 @@
 	return con_id == NULL;
 }
 
-/* Run deferred acpi_gpiochip_request_interrupts() */
-static int acpi_gpio_handle_deferred_request_interrupts(void)
+/* Run deferred acpi_gpiochip_request_irqs() */
+static int acpi_gpio_handle_deferred_request_irqs(void)
 {
 	struct acpi_gpio_chip *acpi_gpio, *tmp;
 
 	mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
 	list_for_each_entry_safe(acpi_gpio, tmp,
 				 &acpi_gpio_deferred_req_irqs_list,
-				 deferred_req_irqs_list_entry) {
-		acpi_handle handle;
-
-		handle = ACPI_HANDLE(acpi_gpio->chip->parent);
-		acpi_walk_resources(handle, "_AEI",
-				    acpi_gpiochip_request_interrupt, acpi_gpio);
-
-		list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
-	}
+				 deferred_req_irqs_list_entry)
+		acpi_gpiochip_request_irqs(acpi_gpio);
 
 	acpi_gpio_deferred_req_irqs_done = true;
 	mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
@@ -1228,4 +1252,4 @@
 	return 0;
 }
 /* We must use _sync so that this runs after the first deferred_probe run */
-late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
+late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 976ad91..5c43f4c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -817,7 +817,15 @@
 	/* Do not leak kernel stack to userspace */
 	memset(&ge, 0, sizeof(ge));
 
-	ge.timestamp = le->timestamp;
+	/*
+	 * We may be running from a nested threaded interrupt in which case
+	 * we didn't get the timestamp from lineevent_irq_handler().
+	 */
+	if (!le->timestamp)
+		ge.timestamp = ktime_get_real_ns();
+	else
+		ge.timestamp = le->timestamp;
+
 	level = gpiod_get_value_cansleep(le->desc);
 
 	if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 65f3eaf..f1a3e8f 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -111,6 +111,26 @@
 	  is 100. Typical values for double buffering will be 200,
 	  triple buffering 300.
 
+config DRM_FBDEV_LEAK_PHYS_SMEM
+	bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)"
+	depends on DRM_FBDEV_EMULATION && EXPERT
+	default n
+	help
+	  In order to keep user-space compatibility, we want in certain
+	  use-cases to keep leaking the fbdev physical address to the
+	  user-space program handling the fbdev buffer.
+	  This affects, not only, Amlogic, Allwinner or Rockchip devices
+	  with ARM Mali GPUs using an userspace Blob.
+	  This option is not supported by upstream developers and should be
+	  removed as soon as possible and be considered as a broken and
+	  legacy behaviour from a modern fbdev device driver.
+
+	  Please send any bug reports when using this to your proprietary
+	  software vendor that requires this.
+
+	  If in doubt, say "N" or spread the word to your closed source
+	  library vendor.
+
 config DRM_LOAD_EDID_FIRMWARE
 	bool "Allow to specify an EDID data set instead of probing for it"
 	depends on DRM
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index a028661..92b11de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -576,6 +576,7 @@
 	{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
+	{ 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
 	{ 0, 0, 0, 0, 0 },
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b31d121..81001d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -122,14 +122,14 @@
 		goto free_chunk;
 	}
 
+	mutex_lock(&p->ctx->lock);
+
 	/* skip guilty context job */
 	if (atomic_read(&p->ctx->guilty) == 1) {
 		ret = -ECANCELED;
 		goto free_chunk;
 	}
 
-	mutex_lock(&p->ctx->lock);
-
 	/* get chunks */
 	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
 	if (copy_from_user(chunk_array, chunk_array_user,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8e26e1c..b40e9c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -753,6 +753,7 @@
 	/* VEGAM */
 	{0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
 	{0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
+	{0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
 	/* Vega 10 */
 	{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
 	{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 8a926d1..2b4199a 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -116,16 +116,16 @@
 	if (r)
 		return r;
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 	if (r)
 		return r;
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_uvd_entity_init(adev);
 
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 5024805..88c006c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -113,16 +113,16 @@
 	if (r)
 		return r;
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 	if (r)
 		return r;
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_uvd_entity_init(adev);
 
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 6ae82cc..d407083 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -420,16 +420,16 @@
 		DRM_INFO("UVD ENC is disabled\n");
 	}
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
 	if (r)
 		return r;
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	if (uvd_v6_0_enc_support(adev)) {
 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 			ring = &adev->uvd.inst->ring_enc[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 9b7f846..057151b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -444,10 +444,6 @@
 		DRM_INFO("PSP loading UVD firmware\n");
 	}
 
-	r = amdgpu_uvd_resume(adev);
-	if (r)
-		return r;
-
 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 		if (adev->uvd.harvest_config & (1 << j))
 			continue;
@@ -479,6 +475,10 @@
 		}
 	}
 
+	r = amdgpu_uvd_resume(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_uvd_entity_init(adev);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 1427675..5aba50f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -661,6 +661,7 @@
 {
 	uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
 	bool is_patched = false;
+	unsigned long flags;
 
 	if (!kfd->init_complete)
 		return;
@@ -670,7 +671,7 @@
 		return;
 	}
 
-	spin_lock(&kfd->interrupt_lock);
+	spin_lock_irqsave(&kfd->interrupt_lock, flags);
 
 	if (kfd->interrupts_active
 	    && interrupt_is_wanted(kfd, ih_ring_entry,
@@ -679,7 +680,7 @@
 				     is_patched ? patched_ihre : ih_ring_entry))
 		queue_work(kfd->ih_wq, &kfd->interrupt_work);
 
-	spin_unlock(&kfd->interrupt_lock);
+	spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
 }
 
 int kgd2kfd_quiesce_mm(struct mm_struct *mm)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 299def8..a851bb0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -565,22 +565,36 @@
 {
 	struct amdgpu_dm_connector *aconnector;
 	struct drm_connector *connector;
+	struct drm_dp_mst_topology_mgr *mgr;
+	int ret;
+	bool need_hotplug = false;
 
 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		   aconnector = to_amdgpu_dm_connector(connector);
-		   if (aconnector->dc_link->type == dc_connection_mst_branch &&
-				   !aconnector->mst_port) {
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    head) {
+		aconnector = to_amdgpu_dm_connector(connector);
+		if (aconnector->dc_link->type != dc_connection_mst_branch ||
+		    aconnector->mst_port)
+			continue;
 
-			   if (suspend)
-				   drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
-			   else
-				   drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
-		   }
+		mgr = &aconnector->mst_mgr;
+
+		if (suspend) {
+			drm_dp_mst_topology_mgr_suspend(mgr);
+		} else {
+			ret = drm_dp_mst_topology_mgr_resume(mgr);
+			if (ret < 0) {
+				drm_dp_mst_topology_mgr_set_mst(mgr, false);
+				need_hotplug = true;
+			}
+		}
 	}
 
 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+	if (need_hotplug)
+		drm_kms_helper_hotplug_event(dev);
 }
 
 static int dm_hw_init(void *handle)
@@ -736,7 +750,6 @@
 	struct drm_plane_state *new_plane_state;
 	struct dm_plane_state *dm_new_plane_state;
 	enum dc_connection_type new_connection_type = dc_connection_none;
-	int ret;
 	int i;
 
 	/* power on hardware */
@@ -809,13 +822,13 @@
 		}
 	}
 
-	ret = drm_atomic_helper_resume(ddev, dm->cached_state);
+	drm_atomic_helper_resume(ddev, dm->cached_state);
 
 	dm->cached_state = NULL;
 
 	amdgpu_dm_irq_resume_late(adev);
 
-	return ret;
+	return 0;
 }
 
 static const struct amd_ip_funcs amdgpu_dm_funcs = {
@@ -2894,6 +2907,7 @@
 		state->underscan_enable = false;
 		state->underscan_hborder = 0;
 		state->underscan_vborder = 0;
+		state->max_bpc = 8;
 
 		__drm_atomic_helper_connector_reset(connector, &state->base);
 	}
@@ -2911,6 +2925,7 @@
 	if (new_state) {
 		__drm_atomic_helper_connector_duplicate_state(connector,
 							      &new_state->base);
+		new_state->max_bpc = state->max_bpc;
 		return &new_state->base;
 	}
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 9bfb040..6a6d977 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -60,6 +60,11 @@
 		return -EINVAL;
 	}
 
+	if (!stream_state) {
+		DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
+		return -EINVAL;
+	}
+
 	/* When enabling CRC, we should also disable dithering. */
 	if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
 		if (dc_stream_configure_crc(stream_state->ctx->dc,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index fced3c1..7c89785 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2457,11 +2457,11 @@
 {
 	struct dc  *core_dc = pipe_ctx->stream->ctx->dc;
 
+	core_dc->hwss.blank_stream(pipe_ctx);
+
 	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
 		deallocate_mst_payload(pipe_ctx);
 
-	core_dc->hwss.blank_stream(pipe_ctx);
-
 	core_dc->hwss.disable_stream(pipe_ctx, option);
 
 	disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 1bb4c31..f77bff5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1425,6 +1425,9 @@
 			DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
 				  crtc->base.id, crtc->name);
 	}
+
+	if (old_state->fake_commit)
+		complete_all(&old_state->fake_commit->flip_done);
 }
 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
 
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 9214c8b..1bda809 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -56,6 +56,25 @@
 		 "Overallocation of the fbdev buffer (%) [default="
 		 __MODULE_STRING(CONFIG_DRM_FBDEV_OVERALLOC) "]");
 
+/*
+ * In order to keep user-space compatibility, we want in certain use-cases
+ * to keep leaking the fbdev physical address to the user-space program
+ * handling the fbdev buffer.
+ * This is a bad habit essentially kept into closed source opengl driver
+ * that should really be moved into open-source upstream projects instead
+ * of using legacy physical addresses in user space to communicate with
+ * other out-of-tree kernel modules.
+ *
+ * This module_param *should* be removed as soon as possible and be
+ * considered as a broken and legacy behaviour from a modern fbdev device.
+ */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+static bool drm_leak_fbdev_smem = false;
+module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
+MODULE_PARM_DESC(fbdev_emulation,
+		 "Allow unsafe leaking fbdev physical smem address [default=false]");
+#endif
+
 static LIST_HEAD(kernel_fb_helper_list);
 static DEFINE_MUTEX(kernel_fb_helper_lock);
 
@@ -1602,6 +1621,64 @@
 	       var_1->transp.msb_right == var_2->transp.msb_right;
 }
 
+static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
+					 u8 depth)
+{
+	switch (depth) {
+	case 8:
+		var->red.offset = 0;
+		var->green.offset = 0;
+		var->blue.offset = 0;
+		var->red.length = 8; /* 8bit DAC */
+		var->green.length = 8;
+		var->blue.length = 8;
+		var->transp.offset = 0;
+		var->transp.length = 0;
+		break;
+	case 15:
+		var->red.offset = 10;
+		var->green.offset = 5;
+		var->blue.offset = 0;
+		var->red.length = 5;
+		var->green.length = 5;
+		var->blue.length = 5;
+		var->transp.offset = 15;
+		var->transp.length = 1;
+		break;
+	case 16:
+		var->red.offset = 11;
+		var->green.offset = 5;
+		var->blue.offset = 0;
+		var->red.length = 5;
+		var->green.length = 6;
+		var->blue.length = 5;
+		var->transp.offset = 0;
+		break;
+	case 24:
+		var->red.offset = 16;
+		var->green.offset = 8;
+		var->blue.offset = 0;
+		var->red.length = 8;
+		var->green.length = 8;
+		var->blue.length = 8;
+		var->transp.offset = 0;
+		var->transp.length = 0;
+		break;
+	case 32:
+		var->red.offset = 16;
+		var->green.offset = 8;
+		var->blue.offset = 0;
+		var->red.length = 8;
+		var->green.length = 8;
+		var->blue.length = 8;
+		var->transp.offset = 24;
+		var->transp.length = 8;
+		break;
+	default:
+		break;
+	}
+}
+
 /**
  * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
  * @var: screeninfo to check
@@ -1613,9 +1690,14 @@
 	struct drm_fb_helper *fb_helper = info->par;
 	struct drm_framebuffer *fb = fb_helper->fb;
 
-	if (var->pixclock != 0 || in_dbg_master())
+	if (in_dbg_master())
 		return -EINVAL;
 
+	if (var->pixclock != 0) {
+		DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
+		var->pixclock = 0;
+	}
+
 	/*
 	 * Changes struct fb_var_screeninfo are currently not pushed back
 	 * to KMS, hence fail if different settings are requested.
@@ -1632,6 +1714,20 @@
 	}
 
 	/*
+	 * Workaround for SDL 1.2, which is known to be setting all pixel format
+	 * fields values to zero in some cases. We treat this situation as a
+	 * kind of "use some reasonable autodetected values".
+	 */
+	if (!var->red.offset     && !var->green.offset    &&
+	    !var->blue.offset    && !var->transp.offset   &&
+	    !var->red.length     && !var->green.length    &&
+	    !var->blue.length    && !var->transp.length   &&
+	    !var->red.msb_right  && !var->green.msb_right &&
+	    !var->blue.msb_right && !var->transp.msb_right) {
+		drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
+	}
+
+	/*
 	 * drm fbdev emulation doesn't support changing the pixel format at all,
 	 * so reject all pixel format changing requests.
 	 */
@@ -1942,59 +2038,7 @@
 	info->var.yoffset = 0;
 	info->var.activate = FB_ACTIVATE_NOW;
 
-	switch (fb->format->depth) {
-	case 8:
-		info->var.red.offset = 0;
-		info->var.green.offset = 0;
-		info->var.blue.offset = 0;
-		info->var.red.length = 8; /* 8bit DAC */
-		info->var.green.length = 8;
-		info->var.blue.length = 8;
-		info->var.transp.offset = 0;
-		info->var.transp.length = 0;
-		break;
-	case 15:
-		info->var.red.offset = 10;
-		info->var.green.offset = 5;
-		info->var.blue.offset = 0;
-		info->var.red.length = 5;
-		info->var.green.length = 5;
-		info->var.blue.length = 5;
-		info->var.transp.offset = 15;
-		info->var.transp.length = 1;
-		break;
-	case 16:
-		info->var.red.offset = 11;
-		info->var.green.offset = 5;
-		info->var.blue.offset = 0;
-		info->var.red.length = 5;
-		info->var.green.length = 6;
-		info->var.blue.length = 5;
-		info->var.transp.offset = 0;
-		break;
-	case 24:
-		info->var.red.offset = 16;
-		info->var.green.offset = 8;
-		info->var.blue.offset = 0;
-		info->var.red.length = 8;
-		info->var.green.length = 8;
-		info->var.blue.length = 8;
-		info->var.transp.offset = 0;
-		info->var.transp.length = 0;
-		break;
-	case 32:
-		info->var.red.offset = 16;
-		info->var.green.offset = 8;
-		info->var.blue.offset = 0;
-		info->var.red.length = 8;
-		info->var.green.length = 8;
-		info->var.blue.length = 8;
-		info->var.transp.offset = 24;
-		info->var.transp.length = 8;
-		break;
-	default:
-		break;
-	}
+	drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth);
 
 	info->var.xres = fb_width;
 	info->var.yres = fb_height;
@@ -3041,6 +3085,12 @@
 	fbi->screen_size = fb->height * fb->pitches[0];
 	fbi->fix.smem_len = fbi->screen_size;
 	fbi->screen_buffer = buffer->vaddr;
+	/* Shamelessly leak the physical address to user-space */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+	if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
+		fbi->fix.smem_start =
+			page_to_phys(virt_to_page(fbi->screen_buffer));
+#endif
 	strcpy(fbi->fix.id, "DRM emulated");
 
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ea10e9a..ba129b6 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -37,6 +37,7 @@
 
 #include <linux/pci.h>
 #include <linux/export.h>
+#include <linux/nospec.h>
 
 /**
  * DOC: getunique and setversion story
@@ -794,13 +795,17 @@
 
 	if (is_driver_ioctl) {
 		/* driver ioctl */
-		if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
+		unsigned int index = nr - DRM_COMMAND_BASE;
+
+		if (index >= dev->driver->num_ioctls)
 			goto err_i1;
-		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+		index = array_index_nospec(index, dev->driver->num_ioctls);
+		ioctl = &dev->driver->ioctls[index];
 	} else {
 		/* core ioctl */
 		if (nr >= DRM_CORE_IOCTL_COUNT)
 			goto err_i1;
+		nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
 		ioctl = &drm_ioctls[nr];
 	}
 
@@ -882,6 +887,7 @@
 
 	if (nr >= DRM_CORE_IOCTL_COUNT)
 		return false;
+	nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
 
 	*flags = drm_ioctls[nr].flags;
 	return true;
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 05d7db2..3eb8b2a 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -360,6 +360,7 @@
 
 	if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
 		msg->flags |= MIPI_DSI_MSG_USE_LPM;
+	msg->flags |= MIPI_DSI_MSG_LASTCOMMAND;
 
 	return ops->transfer(dsi->host, msg);
 }
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 9ad89e3..12e4203 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -996,7 +996,7 @@
 {
 	unsigned int index;
 	u64 virtaddr;
-	unsigned long req_size, pgoff = 0;
+	unsigned long req_size, pgoff, req_start;
 	pgprot_t pg_prot;
 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
 
@@ -1014,7 +1014,17 @@
 	pg_prot = vma->vm_page_prot;
 	virtaddr = vma->vm_start;
 	req_size = vma->vm_end - vma->vm_start;
-	pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+	pgoff = vma->vm_pgoff &
+		((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+	req_start = pgoff << PAGE_SHIFT;
+
+	if (!intel_vgpu_in_aperture(vgpu, req_start))
+		return -EINVAL;
+	if (req_start + req_size >
+	    vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
+		return -EINVAL;
+
+	pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
 
 	return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5f57f4e..87411a5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2128,6 +2128,7 @@
 int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
 {
 	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+	int err;
 
 	/*
 	 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
@@ -2143,9 +2144,17 @@
 	 * allocator works in address space sizes, so it's multiplied by page
 	 * size. We allocate at the top of the GTT to avoid fragmentation.
 	 */
-	return i915_vma_pin(ppgtt->vma,
-			    0, GEN6_PD_ALIGN,
-			    PIN_GLOBAL | PIN_HIGH);
+	err = i915_vma_pin(ppgtt->vma,
+			   0, GEN6_PD_ALIGN,
+			   PIN_GLOBAL | PIN_HIGH);
+	if (err)
+		goto unpin;
+
+	return 0;
+
+unpin:
+	ppgtt->pin_count = 0;
+	return err;
 }
 
 void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 191b314..709475d 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -45,7 +45,6 @@
 	struct drm_crtc base;
 	struct drm_pending_vblank_event *event;
 	struct meson_drm *priv;
-	bool enabled;
 };
 #define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
 
@@ -81,7 +80,8 @@
 
 };
 
-static void meson_crtc_enable(struct drm_crtc *crtc)
+static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_state)
 {
 	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
 	struct drm_crtc_state *crtc_state = crtc->state;
@@ -103,20 +103,6 @@
 
 	drm_crtc_vblank_on(crtc);
 
-	meson_crtc->enabled = true;
-}
-
-static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
-				     struct drm_crtc_state *old_state)
-{
-	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
-	struct meson_drm *priv = meson_crtc->priv;
-
-	DRM_DEBUG_DRIVER("\n");
-
-	if (!meson_crtc->enabled)
-		meson_crtc_enable(crtc);
-
 	priv->viu.osd1_enabled = true;
 }
 
@@ -142,8 +128,6 @@
 
 		crtc->state->event = NULL;
 	}
-
-	meson_crtc->enabled = false;
 }
 
 static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -152,9 +136,6 @@
 	struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
 	unsigned long flags;
 
-	if (crtc->state->enable && !meson_crtc->enabled)
-		meson_crtc_enable(crtc);
-
 	if (crtc->state->event) {
 		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
 
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index d344312..bf5f294 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -82,6 +82,10 @@
 	.fb_create           = drm_gem_fb_create,
 };
 
+static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
+	.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
 static irqreturn_t meson_irq(int irq, void *arg)
 {
 	struct drm_device *dev = arg;
@@ -246,6 +250,7 @@
 	drm->mode_config.max_width = 3840;
 	drm->mode_config.max_height = 2160;
 	drm->mode_config.funcs = &meson_mode_config_funcs;
+	drm->mode_config.helper_private	= &meson_mode_config_helpers;
 
 	/* Hardware Initialization */
 
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 94093bc..578c43d 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -15,6 +15,7 @@
 	select WANT_DEV_COREDUMP
 	select SND_SOC_HDMI_CODEC if SND_SOC
 	select SYNC_FILE
+	select MSM_EXT_DISPLAY
 	select PM_OPP
 	default y
 	help
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
new file mode 100644
index 0000000..879c13f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -0,0 +1,479 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_core_irq.h"
+#include "dpu_trace.h"
+
+/**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+ * @arg:		private data of callback handler
+ * @irq_idx:		interrupt index
+ */
+static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
+{
+	struct dpu_kms *dpu_kms = arg;
+	struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
+	struct dpu_irq_callback *cb;
+	unsigned long irq_flags;
+
+	pr_debug("irq_idx=%d\n", irq_idx);
+
+	if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
+		DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
+			atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
+	}
+
+	atomic_inc(&irq_obj->irq_counts[irq_idx]);
+
+	/*
+	 * Perform registered function callback
+	 */
+	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+	list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+		if (cb->func)
+			cb->func(cb->arg, irq_idx);
+	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+	/*
+	 * Clear pending interrupt status in HW.
+	 * NOTE: dpu_core_irq_callback_handler is protected by top-level
+	 *       spinlock, so it is safe to clear any interrupt status here.
+	 */
+	dpu_kms->hw_intr->ops.clear_intr_status_nolock(
+			dpu_kms->hw_intr,
+			irq_idx);
+}
+
+int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
+		enum dpu_intr_type intr_type, u32 instance_idx)
+{
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->hw_intr->ops.irq_idx_lookup)
+		return -EINVAL;
+
+	return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+			instance_idx);
+}
+
+/**
+ * _dpu_core_irq_enable - enable core interrupt given by the index
+ * @dpu_kms:		Pointer to dpu kms context
+ * @irq_idx:		interrupt index
+ */
+static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+	unsigned long irq_flags;
+	int ret = 0, enable_count;
+
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->irq_obj.enable_counts ||
+			!dpu_kms->irq_obj.irq_counts) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+	DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+	trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
+
+	if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
+		ret = dpu_kms->hw_intr->ops.enable_irq(
+				dpu_kms->hw_intr,
+				irq_idx);
+		if (ret)
+			DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+					irq_idx);
+
+		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+
+		spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+		/* empty callback list but interrupt is enabled */
+		if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
+			DPU_ERROR("irq_idx=%d enabled with no callback\n",
+					irq_idx);
+		spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+	}
+
+	return ret;
+}
+
+int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+	int i, ret = 0, counts;
+
+	if (!dpu_kms || !irq_idxs || !irq_count) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+	if (counts)
+		DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+	for (i = 0; (i < irq_count) && !ret; i++)
+		ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
+
+	return ret;
+}
+
+/**
+ * _dpu_core_irq_disable - disable core interrupt given by the index
+ * @dpu_kms:		Pointer to dpu kms context
+ * @irq_idx:		interrupt index
+ */
+static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+	int ret = 0, enable_count;
+
+	if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+	DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+	trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
+
+	if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+		ret = dpu_kms->hw_intr->ops.disable_irq(
+				dpu_kms->hw_intr,
+				irq_idx);
+		if (ret)
+			DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+					irq_idx);
+		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+	}
+
+	return ret;
+}
+
+int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+	int i, ret = 0, counts;
+
+	if (!dpu_kms || !irq_idxs || !irq_count) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+	if (counts == 2)
+		DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+	for (i = 0; (i < irq_count) && !ret; i++)
+		ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
+
+	return ret;
+}
+
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
+{
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->hw_intr->ops.get_interrupt_status)
+		return 0;
+
+	if (irq_idx < 0) {
+		DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+				__builtin_return_address(0), irq_idx);
+		return 0;
+	}
+
+	return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
+			irq_idx, clear);
+}
+
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+		struct dpu_irq_callback *register_irq_cb)
+{
+	unsigned long irq_flags;
+
+	if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (!register_irq_cb || !register_irq_cb->func) {
+		DPU_ERROR("invalid irq_cb:%d func:%d\n",
+				register_irq_cb != NULL,
+				register_irq_cb ?
+					register_irq_cb->func != NULL : -1);
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+	trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
+	list_del_init(&register_irq_cb->list);
+	list_add_tail(&register_irq_cb->list,
+			&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
+	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+	return 0;
+}
+
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
+		struct dpu_irq_callback *register_irq_cb)
+{
+	unsigned long irq_flags;
+
+	if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (!register_irq_cb || !register_irq_cb->func) {
+		DPU_ERROR("invalid irq_cb:%d func:%d\n",
+				register_irq_cb != NULL,
+				register_irq_cb ?
+					register_irq_cb->func != NULL : -1);
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+	trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
+	list_del_init(&register_irq_cb->list);
+	/* empty callback list but interrupt is still enabled */
+	if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
+			atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
+		DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+	return 0;
+}
+
+static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
+{
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->hw_intr->ops.clear_all_irqs)
+		return;
+
+	dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
+}
+
+static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
+{
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->hw_intr->ops.disable_all_irqs)
+		return;
+
+	dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)				\
+static int __prefix ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __prefix ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __prefix ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __prefix ## _open,					\
+	.release = single_release,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+}
+
+static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+	struct dpu_irq *irq_obj = s->private;
+	struct dpu_irq_callback *cb;
+	unsigned long irq_flags;
+	int i, irq_count, enable_count, cb_count;
+
+	if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
+		DPU_ERROR("invalid parameters\n");
+		return 0;
+	}
+
+	for (i = 0; i < irq_obj->total_irqs; i++) {
+		spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+		cb_count = 0;
+		irq_count = atomic_read(&irq_obj->irq_counts[i]);
+		enable_count = atomic_read(&irq_obj->enable_counts[i]);
+		list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+			cb_count++;
+		spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+
+		if (irq_count || enable_count || cb_count)
+			seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
+					i, irq_count, enable_count, cb_count);
+	}
+
+	return 0;
+}
+
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
+
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+		struct dentry *parent)
+{
+	dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
+			parent, &dpu_kms->irq_obj,
+			&dpu_debugfs_core_irq_fops);
+
+	return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+	debugfs_remove(dpu_kms->irq_obj.debugfs_file);
+	dpu_kms->irq_obj.debugfs_file = NULL;
+}
+
+#else
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+		struct dentry *parent)
+{
+	return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+{
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return;
+	} else if (!dpu_kms->dev) {
+		DPU_ERROR("invalid drm device\n");
+		return;
+	} else if (!dpu_kms->dev->dev_private) {
+		DPU_ERROR("invalid device private\n");
+		return;
+	}
+	priv = dpu_kms->dev->dev_private;
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+	dpu_clear_all_irqs(dpu_kms);
+	dpu_disable_all_irqs(dpu_kms);
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	spin_lock_init(&dpu_kms->irq_obj.cb_lock);
+
+	/* Create irq callbacks for all possible irq_idx */
+	dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
+	dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
+			sizeof(struct list_head), GFP_KERNEL);
+	dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+			sizeof(atomic_t), GFP_KERNEL);
+	dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+			sizeof(atomic_t), GFP_KERNEL);
+	for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
+		INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
+		atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
+		atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
+	}
+}
+
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
+{
+	return 0;
+}
+
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+{
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return;
+	} else if (!dpu_kms->dev) {
+		DPU_ERROR("invalid drm device\n");
+		return;
+	} else if (!dpu_kms->dev->dev_private) {
+		DPU_ERROR("invalid device private\n");
+		return;
+	}
+	priv = dpu_kms->dev->dev_private;
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+	for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
+		if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
+				!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
+			DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+	dpu_clear_all_irqs(dpu_kms);
+	dpu_disable_all_irqs(dpu_kms);
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	kfree(dpu_kms->irq_obj.irq_cb_tbl);
+	kfree(dpu_kms->irq_obj.enable_counts);
+	kfree(dpu_kms->irq_obj.irq_counts);
+	dpu_kms->irq_obj.irq_cb_tbl = NULL;
+	dpu_kms->irq_obj.enable_counts = NULL;
+	dpu_kms->irq_obj.irq_counts = NULL;
+	dpu_kms->irq_obj.total_irqs = 0;
+}
+
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
+{
+	/*
+	 * Read interrupt status from all sources. Interrupt status are
+	 * stored within hw_intr.
+	 * Function will also clear the interrupt status after reading.
+	 * Individual interrupt status bit will only get stored if it
+	 * is enabled.
+	 */
+	dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
+
+	/*
+	 * Dispatch to HW driver to handle interrupt lookup that is being
+	 * fired. When matching interrupt is located, HW driver will call to
+	 * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
+	 * dpu_core_irq_callback_handler will perform the registered function
+	 * callback, and do the interrupt status clearing once the registered
+	 * callback is finished.
+	 */
+	dpu_kms->hw_intr->ops.dispatch_irqs(
+			dpu_kms->hw_intr,
+			dpu_core_irq_callback_handler,
+			dpu_kms);
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
new file mode 100644
index 0000000..5e98bba
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -0,0 +1,153 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_CORE_IRQ_H__
+#define __DPU_CORE_IRQ_H__
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+
+/**
+ * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @dpu_kms:		DPU handle
+ * @return:		none
+ */
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
+ * @dpu_kms:		DPU handle
+ * @return:		0 if success; error code otherwise
+ */
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_uninstall - uninstall core IRQ handler
+ * @dpu_kms:		DPU handle
+ * @return:		none
+ */
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq - core IRQ handler
+ * @dpu_kms:		DPU handle
+ * @return:		interrupt handling status
+ */
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ *                      interrupt mapping table.
+ * @dpu_kms:		DPU handle
+ * @intr_type:		DPU HW interrupt type for lookup
+ * @instance_idx:	DPU HW block instance defined in dpu_hw_mdss.h
+ * @return:		irq_idx or -EINVAL when fail to lookup
+ */
+int dpu_core_irq_idx_lookup(
+		struct dpu_kms *dpu_kms,
+		enum dpu_intr_type intr_type,
+		uint32_t instance_idx);
+
+/**
+ * dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
+ * @dpu_kms:		DPU handle
+ * @irq_idxs:		Array of irq index
+ * @irq_count:		Number of irq_idx provided in the array
+ * @return:		0 for success enabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable.  Interrupts is enabled if count is 0 before increment.
+ */
+int dpu_core_irq_enable(
+		struct dpu_kms *dpu_kms,
+		int *irq_idxs,
+		uint32_t irq_count);
+
+/**
+ * dpu_core_irq_disable - IRQ helper function for disabling one of more IRQs
+ * @dpu_kms:		DPU handle
+ * @irq_idxs:		Array of irq index
+ * @irq_count:		Number of irq_idx provided in the array
+ * @return:		0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable.  Interrupts is disabled if count is 0 after decrement.
+ */
+int dpu_core_irq_disable(
+		struct dpu_kms *dpu_kms,
+		int *irq_idxs,
+		uint32_t irq_count);
+
+/**
+ * dpu_core_irq_read - IRQ helper function for reading IRQ status
+ * @dpu_kms:		DPU handle
+ * @irq_idx:		irq index
+ * @clear:		True to clear the irq after read
+ * @return:		non-zero if irq detected; otherwise no irq detected
+ */
+u32 dpu_core_irq_read(
+		struct dpu_kms *dpu_kms,
+		int irq_idx,
+		bool clear);
+
+/**
+ * dpu_core_irq_register_callback - For registering callback function on IRQ
+ *                             interrupt
+ * @dpu_kms:		DPU handle
+ * @irq_idx:		irq index
+ * @irq_cb:		IRQ callback structure, containing callback function
+ *			and argument. Passing NULL for irq_cb will unregister
+ *			the callback for the given irq_idx
+ *			This must exist until un-registration.
+ * @return:		0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_register_callback(
+		struct dpu_kms *dpu_kms,
+		int irq_idx,
+		struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
+ *                             interrupt
+ * @dpu_kms:		DPU handle
+ * @irq_idx:		irq index
+ * @irq_cb:		IRQ callback structure, containing callback function
+ *			and argument. Passing NULL for irq_cb will unregister
+ *			the callback for the given irq_idx
+ *			This must match with registration.
+ * @return:		0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_unregister_callback(
+		struct dpu_kms *dpu_kms,
+		int irq_idx,
+		struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_debugfs_core_irq_init - register core irq debugfs
+ * @dpu_kms: pointer to kms
+ * @parent: debugfs directory root
+ * @Return: 0 on success
+ */
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+		struct dentry *parent);
+
+/**
+ * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
+ * @dpu_kms: pointer to kms
+ */
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
+
+#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
new file mode 100644
index 0000000..41c5191
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -0,0 +1,637 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_core_perf.h"
+
+#define DPU_PERF_MODE_STRING_SIZE	128
+
+/**
+ * enum dpu_perf_mode - performance tuning mode
+ * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
+ * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
+ * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
+ */
+enum dpu_perf_mode {
+	DPU_PERF_MODE_NORMAL,
+	DPU_PERF_MODE_MINIMUM,
+	DPU_PERF_MODE_FIXED,
+	DPU_PERF_MODE_MAX
+};
+
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+	struct msm_drm_private *priv;
+
+	if (!crtc->dev || !crtc->dev->dev_private) {
+		DPU_ERROR("invalid device\n");
+		return NULL;
+	}
+
+	priv = crtc->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid kms\n");
+		return NULL;
+	}
+
+	return to_dpu_kms(priv->kms);
+}
+
+static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
+{
+	return dpu_crtc_is_enabled(crtc);
+}
+
+static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
+{
+	struct drm_crtc *tmp_crtc;
+	bool intf_connected = false;
+
+	if (!crtc)
+		goto end;
+
+	drm_for_each_crtc(tmp_crtc, crtc->dev) {
+		if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
+				_dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+			DPU_DEBUG("video interface connected crtc:%d\n",
+				tmp_crtc->base.id);
+			intf_connected = true;
+			goto end;
+		}
+	}
+
+end:
+	return intf_connected;
+}
+
+static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
+		struct drm_crtc *crtc,
+		struct drm_crtc_state *state,
+		struct dpu_core_perf_params *perf)
+{
+	struct dpu_crtc_state *dpu_cstate;
+	int i;
+
+	if (!kms || !kms->catalog || !crtc || !state || !perf) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	dpu_cstate = to_dpu_crtc_state(state);
+	memset(perf, 0, sizeof(struct dpu_core_perf_params));
+
+	if (!dpu_cstate->bw_control) {
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
+					1000ULL;
+			perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
+		}
+		perf->core_clk_rate = kms->perf.max_core_clk_rate;
+	} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			perf->bw_ctl[i] = 0;
+			perf->max_per_pipe_ib[i] = 0;
+		}
+		perf->core_clk_rate = 0;
+	} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
+			perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
+		}
+		perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+	}
+
+	DPU_DEBUG(
+		"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
+			crtc->base.id, perf->core_clk_rate,
+			perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+			perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+			perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
+			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+}
+
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	u32 bw, threshold;
+	u64 bw_sum_of_intfs = 0;
+	enum dpu_crtc_client_type curr_client_type;
+	bool is_video_mode;
+	struct dpu_crtc_state *dpu_cstate;
+	struct drm_crtc *tmp_crtc;
+	struct dpu_kms *kms;
+	int i;
+
+	if (!crtc || !state) {
+		DPU_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	kms = _dpu_crtc_get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		DPU_ERROR("invalid parameters\n");
+		return 0;
+	}
+
+	/* we only need bandwidth check on real-time clients (interfaces) */
+	if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
+		return 0;
+
+	dpu_cstate = to_dpu_crtc_state(state);
+
+	/* obtain new values */
+	_dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
+
+	for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+			i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
+		curr_client_type = dpu_crtc_get_client_type(crtc);
+
+		drm_for_each_crtc(tmp_crtc, crtc->dev) {
+			if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+			    (dpu_crtc_get_client_type(tmp_crtc) ==
+					    curr_client_type) &&
+			    (tmp_crtc != crtc)) {
+				struct dpu_crtc_state *tmp_cstate =
+					to_dpu_crtc_state(tmp_crtc->state);
+
+				DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+					tmp_crtc->base.id,
+					tmp_cstate->new_perf.bw_ctl[i],
+					tmp_cstate->bw_control);
+				/*
+				 * For bw check only use the bw if the
+				 * atomic property has been already set
+				 */
+				if (tmp_cstate->bw_control)
+					bw_sum_of_intfs +=
+						tmp_cstate->new_perf.bw_ctl[i];
+			}
+		}
+
+		/* convert bandwidth to kb */
+		bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+		DPU_DEBUG("calculated bandwidth=%uk\n", bw);
+
+		is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+		threshold = (is_video_mode ||
+			_dpu_core_video_mode_intf_connected(crtc)) ?
+			kms->catalog->perf.max_bw_low :
+			kms->catalog->perf.max_bw_high;
+
+		DPU_DEBUG("final threshold bw limit = %d\n", threshold);
+
+		if (!dpu_cstate->bw_control) {
+			DPU_DEBUG("bypass bandwidth check\n");
+		} else if (!threshold) {
+			DPU_ERROR("no bandwidth limits specified\n");
+			return -E2BIG;
+		} else if (bw > threshold) {
+			DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+					threshold);
+			return -E2BIG;
+		}
+	}
+
+	return 0;
+}
+
+static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
+		struct drm_crtc *crtc, u32 bus_id)
+{
+	struct dpu_core_perf_params perf = { { 0 } };
+	enum dpu_crtc_client_type curr_client_type
+					= dpu_crtc_get_client_type(crtc);
+	struct drm_crtc *tmp_crtc;
+	struct dpu_crtc_state *dpu_cstate;
+	int ret = 0;
+
+	drm_for_each_crtc(tmp_crtc, crtc->dev) {
+		if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+			curr_client_type ==
+				dpu_crtc_get_client_type(tmp_crtc)) {
+			dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+			perf.max_per_pipe_ib[bus_id] =
+				max(perf.max_per_pipe_ib[bus_id],
+				dpu_cstate->new_perf.max_per_pipe_ib[bus_id]);
+
+			DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
+				tmp_crtc->base.id, bus_id,
+				dpu_cstate->new_perf.bw_ctl[bus_id]);
+		}
+	}
+	return ret;
+}
+
+/**
+ * @dpu_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc - pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+	struct drm_crtc *tmp_crtc;
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_state *dpu_cstate;
+	struct dpu_kms *kms;
+	int i;
+
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+
+	kms = _dpu_crtc_get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		DPU_ERROR("invalid kms\n");
+		return;
+	}
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+	/* only do this for command mode rt client */
+	if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
+		return;
+
+	/*
+	 * If video interface present, cmd panel bandwidth cannot be
+	 * released.
+	 */
+	if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
+		drm_for_each_crtc(tmp_crtc, crtc->dev) {
+			if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+				dpu_crtc_get_intf_mode(tmp_crtc) ==
+						INTF_MODE_VIDEO)
+				return;
+		}
+
+	/* Release the bandwidth */
+	if (kms->perf.enable_bw_release) {
+		trace_dpu_cmd_release_bw(crtc->base.id);
+		DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			dpu_crtc->cur_perf.bw_ctl[i] = 0;
+			_dpu_core_perf_crtc_update_bus(kms, crtc, i);
+		}
+	}
+}
+
+static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
+{
+	struct dss_clk *core_clk = kms->perf.core_clk;
+
+	if (core_clk->max_rate && (rate > core_clk->max_rate))
+		rate = core_clk->max_rate;
+
+	core_clk->rate = rate;
+	return msm_dss_clk_set_rate(core_clk, 1);
+}
+
+static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
+{
+	u64 clk_rate = kms->perf.perf_tune.min_core_clk;
+	struct drm_crtc *crtc;
+	struct dpu_crtc_state *dpu_cstate;
+
+	drm_for_each_crtc(crtc, kms->dev) {
+		if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+			dpu_cstate = to_dpu_crtc_state(crtc->state);
+			clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
+							clk_rate);
+			clk_rate = clk_round_rate(kms->perf.core_clk->clk,
+					clk_rate);
+		}
+	}
+
+	if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
+		clk_rate = kms->perf.fix_core_clk_rate;
+
+	DPU_DEBUG("clk:%llu\n", clk_rate);
+
+	return clk_rate;
+}
+
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+		int params_changed, bool stop_req)
+{
+	struct dpu_core_perf_params *new, *old;
+	int update_bus = 0, update_clk = 0;
+	u64 clk_rate = 0;
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_state *dpu_cstate;
+	int i;
+	struct msm_drm_private *priv;
+	struct dpu_kms *kms;
+	int ret;
+
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	kms = _dpu_crtc_get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		DPU_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+	priv = kms->dev->dev_private;
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+	DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
+			crtc->base.id, stop_req, kms->perf.core_clk_rate);
+
+	old = &dpu_crtc->cur_perf;
+	new = &dpu_cstate->new_perf;
+
+	if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			/*
+			 * cases for bus bandwidth update.
+			 * 1. new bandwidth vote - "ab or ib vote" is higher
+			 *    than current vote for update request.
+			 * 2. new bandwidth vote - "ab or ib vote" is lower
+			 *    than current vote at end of commit or stop.
+			 */
+			if ((params_changed && ((new->bw_ctl[i] >
+						old->bw_ctl[i]) ||
+				  (new->max_per_pipe_ib[i] >
+						old->max_per_pipe_ib[i]))) ||
+			    (!params_changed && ((new->bw_ctl[i] <
+						old->bw_ctl[i]) ||
+				  (new->max_per_pipe_ib[i] <
+						old->max_per_pipe_ib[i])))) {
+				DPU_DEBUG(
+					"crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+					crtc->base.id, params_changed,
+					new->bw_ctl[i], old->bw_ctl[i]);
+				old->bw_ctl[i] = new->bw_ctl[i];
+				old->max_per_pipe_ib[i] =
+						new->max_per_pipe_ib[i];
+				update_bus |= BIT(i);
+			}
+		}
+
+		if ((params_changed &&
+				(new->core_clk_rate > old->core_clk_rate)) ||
+				(!params_changed &&
+				(new->core_clk_rate < old->core_clk_rate))) {
+			old->core_clk_rate = new->core_clk_rate;
+			update_clk = 1;
+		}
+	} else {
+		DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
+		memset(old, 0, sizeof(*old));
+		memset(new, 0, sizeof(*new));
+		update_bus = ~0;
+		update_clk = 1;
+	}
+	trace_dpu_perf_crtc_update(crtc->base.id,
+				new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+				new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+				new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+				new->core_clk_rate, stop_req,
+				update_bus, update_clk);
+
+	for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		if (update_bus & BIT(i)) {
+			ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+			if (ret) {
+				DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n",
+					  crtc->base.id, i);
+				return ret;
+			}
+		}
+	}
+
+	/*
+	 * Update the clock after bandwidth vote to ensure
+	 * bandwidth is available before clock rate is increased.
+	 */
+	if (update_clk) {
+		clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
+
+		trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
+
+		ret = _dpu_core_perf_set_core_clk_rate(kms, clk_rate);
+		if (ret) {
+			DPU_ERROR("failed to set %s clock rate %llu\n",
+					kms->perf.core_clk->clk_name, clk_rate);
+			return ret;
+		}
+
+		kms->perf.core_clk_rate = clk_rate;
+		DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
+	}
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _dpu_core_perf_mode_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct dpu_core_perf *perf = file->private_data;
+	struct dpu_perf_cfg *cfg = &perf->catalog->perf;
+	u32 perf_mode = 0;
+	char buf[10];
+
+	if (!perf)
+		return -ENODEV;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (kstrtouint(buf, 0, &perf_mode))
+		return -EFAULT;
+
+	if (perf_mode >= DPU_PERF_MODE_MAX)
+		return -EFAULT;
+
+	if (perf_mode == DPU_PERF_MODE_FIXED) {
+		DRM_INFO("fix performance mode\n");
+	} else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
+		/* run the driver with max clk and BW vote */
+		perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
+		perf->perf_tune.min_bus_vote =
+				(u64) cfg->max_bw_high * 1000;
+		DRM_INFO("minimum performance mode\n");
+	} else if (perf_mode == DPU_PERF_MODE_NORMAL) {
+		/* reset the perf tune params to 0 */
+		perf->perf_tune.min_core_clk = 0;
+		perf->perf_tune.min_bus_vote = 0;
+		DRM_INFO("normal performance mode\n");
+	}
+	perf->perf_tune.mode = perf_mode;
+
+	return count;
+}
+
+static ssize_t _dpu_core_perf_mode_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct dpu_core_perf *perf = file->private_data;
+	int len = 0;
+	char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
+
+	if (!perf)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	len = snprintf(buf, sizeof(buf),
+			"mode %d min_mdp_clk %llu min_bus_vote %llu\n",
+			perf->perf_tune.mode,
+			perf->perf_tune.min_core_clk,
+			perf->perf_tune.min_bus_vote);
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;   /* increase offset */
+
+	return len;
+}
+
+static const struct file_operations dpu_core_perf_mode_fops = {
+	.open = simple_open,
+	.read = _dpu_core_perf_mode_read,
+	.write = _dpu_core_perf_mode_write,
+};
+
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+	debugfs_remove_recursive(perf->debugfs_root);
+	perf->debugfs_root = NULL;
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+		struct dentry *parent)
+{
+	struct dpu_mdss_cfg *catalog = perf->catalog;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	priv = perf->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return -EINVAL;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+
+	perf->debugfs_root = debugfs_create_dir("core_perf", parent);
+	if (!perf->debugfs_root) {
+		DPU_ERROR("failed to create core perf debugfs\n");
+		return -EINVAL;
+	}
+
+	debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+			&perf->max_core_clk_rate);
+	debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+			&perf->core_clk_rate);
+	debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+			(u32 *)&perf->enable_bw_release);
+	debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.max_bw_low);
+	debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.max_bw_high);
+	debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.min_core_ib);
+	debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.min_llcc_ib);
+	debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.min_dram_ib);
+	debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+			(u32 *)perf, &dpu_core_perf_mode_fops);
+	debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+			&perf->fix_core_clk_rate);
+	debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+			&perf->fix_core_ib_vote);
+	debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+			&perf->fix_core_ab_vote);
+
+	return 0;
+}
+#else
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+		struct dentry *parent)
+{
+	return 0;
+}
+#endif
+
+void dpu_core_perf_destroy(struct dpu_core_perf *perf)
+{
+	if (!perf) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	dpu_core_perf_debugfs_destroy(perf);
+	perf->max_core_clk_rate = 0;
+	perf->core_clk = NULL;
+	perf->phandle = NULL;
+	perf->catalog = NULL;
+	perf->dev = NULL;
+}
+
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+		struct drm_device *dev,
+		struct dpu_mdss_cfg *catalog,
+		struct dpu_power_handle *phandle,
+		struct dss_clk *core_clk)
+{
+	perf->dev = dev;
+	perf->catalog = catalog;
+	perf->phandle = phandle;
+	perf->core_clk = core_clk;
+
+	perf->max_core_clk_rate = core_clk->max_rate;
+	if (!perf->max_core_clk_rate) {
+		DPU_DEBUG("optional max core clk rate, use default\n");
+		perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
new file mode 100644
index 0000000..fbcbe0c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -0,0 +1,133 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_CORE_PERF_H_
+#define _DPU_CORE_PERF_H_
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "dpu_hw_catalog.h"
+#include "dpu_power_handle.h"
+
+#define	DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE	412500000
+
+/**
+ * struct dpu_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct dpu_core_perf_params {
+	u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
+	u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+	u64 core_clk_rate;
+};
+
+/**
+ * struct dpu_core_perf_tune - definition of performance tuning control
+ * @mode: performance mode
+ * @min_core_clk: minimum core clock
+ * @min_bus_vote: minimum bus vote
+ */
+struct dpu_core_perf_tune {
+	u32 mode;
+	u64 min_core_clk;
+	u64 min_bus_vote;
+};
+
+/**
+ * struct dpu_core_perf - definition of core performance context
+ * @dev: Pointer to drm device
+ * @debugfs_root: top level debug folder
+ * @catalog: Pointer to catalog configuration
+ * @phandle: Pointer to power handler
+ * @core_clk: Pointer to core clock structure
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ */
+struct dpu_core_perf {
+	struct drm_device *dev;
+	struct dentry *debugfs_root;
+	struct dpu_mdss_cfg *catalog;
+	struct dpu_power_handle *phandle;
+	struct dss_clk *core_clk;
+	u64 core_clk_rate;
+	u64 max_core_clk_rate;
+	struct dpu_core_perf_tune perf_tune;
+	u32 enable_bw_release;
+	u64 fix_core_clk_rate;
+	u64 fix_core_ib_vote;
+	u64 fix_core_ab_vote;
+};
+
+/**
+ * dpu_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+		struct drm_crtc_state *state);
+
+/**
+ * dpu_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * @stop_req: true if this is a stop request
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+		int params_changed, bool stop_req);
+
+/**
+ * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * dpu_core_perf_destroy - destroy the given core performance context
+ * @perf: Pointer to core performance context
+ */
+void dpu_core_perf_destroy(struct dpu_core_perf *perf);
+
+/**
+ * dpu_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @dev: Pointer to drm device
+ * @catalog: Pointer to catalog
+ * @phandle: Pointer to power handle
+ * @core_clk: pointer to core clock
+ */
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+		struct drm_device *dev,
+		struct dpu_mdss_cfg *catalog,
+		struct dpu_power_handle *phandle,
+		struct dss_clk *core_clk);
+
+/**
+ * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
+ * @perf: Pointer to core performance context
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+		struct dentry *parent);
+
+#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 02eb073..4752f08 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -280,287 +280,6 @@
 	mutex_unlock(rp_lock);
 }
 
-/**
- * _dpu_crtc_rp_add_no_lock - add given resource to resource pool without lock
- * @rp: Pointer to original resource pool
- * @type: Resource type
- * @tag: Search tag for given resource
- * @val: Resource handle
- * @ops: Resource callback operations
- * return: 0 if success; error code otherwise
- */
-static int _dpu_crtc_rp_add_no_lock(struct dpu_crtc_respool *rp, u32 type,
-		u64 tag, void *val, struct dpu_crtc_res_ops *ops)
-{
-	struct dpu_crtc_res *res;
-	struct drm_crtc *crtc;
-
-	if (!rp || !ops) {
-		DPU_ERROR("invalid resource pool/ops\n");
-		return -EINVAL;
-	}
-
-	crtc = _dpu_crtc_rp_to_crtc(rp);
-	if (!crtc) {
-		DPU_ERROR("invalid crtc\n");
-		return -EINVAL;
-	}
-
-	list_for_each_entry(res, &rp->res_list, list) {
-		if (res->type != type || res->tag != tag)
-			continue;
-		DPU_ERROR("crtc%d.%u already exist res:0x%x/0x%llx/%pK/%d\n",
-				crtc->base.id, rp->sequence_id,
-				res->type, res->tag, res->val,
-				atomic_read(&res->refcount));
-		return -EEXIST;
-	}
-	res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
-	if (!res)
-		return -ENOMEM;
-	INIT_LIST_HEAD(&res->list);
-	atomic_set(&res->refcount, 1);
-	res->type = type;
-	res->tag = tag;
-	res->val = val;
-	res->ops = *ops;
-	list_add_tail(&res->list, &rp->res_list);
-	DPU_DEBUG("crtc%d.%u added res:0x%x/0x%llx\n",
-			crtc->base.id, rp->sequence_id, type, tag);
-	return 0;
-}
-
-/**
- * _dpu_crtc_rp_add - add given resource to resource pool
- * @rp: Pointer to original resource pool
- * @type: Resource type
- * @tag: Search tag for given resource
- * @val: Resource handle
- * @ops: Resource callback operations
- * return: 0 if success; error code otherwise
- */
-static int _dpu_crtc_rp_add(struct dpu_crtc_respool *rp, u32 type, u64 tag,
-		void *val, struct dpu_crtc_res_ops *ops)
-{
-	int rc;
-
-	if (!rp) {
-		DPU_ERROR("invalid resource pool\n");
-		return -EINVAL;
-	}
-
-	mutex_lock(rp->rp_lock);
-	rc = _dpu_crtc_rp_add_no_lock(rp, type, tag, val, ops);
-	mutex_unlock(rp->rp_lock);
-	return rc;
-}
-
-/**
- * _dpu_crtc_rp_get - lookup the resource from given resource pool and obtain
- *	if available; otherwise, obtain resource from global pool
- * @rp: Pointer to original resource pool
- * @type: Resource type
- * @tag:  Search tag for given resource
- * return: Resource handle if success; pointer error or null otherwise
- */
-static void *_dpu_crtc_rp_get(struct dpu_crtc_respool *rp, u32 type, u64 tag)
-{
-	struct dpu_crtc_respool *old_rp;
-	struct dpu_crtc_res *res;
-	void *val = NULL;
-	int rc;
-	struct drm_crtc *crtc;
-
-	if (!rp) {
-		DPU_ERROR("invalid resource pool\n");
-		return NULL;
-	}
-
-	crtc = _dpu_crtc_rp_to_crtc(rp);
-	if (!crtc) {
-		DPU_ERROR("invalid crtc\n");
-		return NULL;
-	}
-
-	mutex_lock(rp->rp_lock);
-	list_for_each_entry(res, &rp->res_list, list) {
-		if (res->type != type || res->tag != tag)
-			continue;
-		DPU_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
-				crtc->base.id, rp->sequence_id,
-				res->type, res->tag, res->val,
-				atomic_read(&res->refcount));
-		atomic_inc(&res->refcount);
-		res->flags &= ~DPU_CRTC_RES_FLAG_FREE;
-		mutex_unlock(rp->rp_lock);
-		return res->val;
-	}
-	list_for_each_entry(res, &rp->res_list, list) {
-		if (res->type != type || !(res->flags & DPU_CRTC_RES_FLAG_FREE))
-			continue;
-		DPU_DEBUG("crtc%d.%u retag res:0x%x/0x%llx/%pK/%d\n",
-				crtc->base.id, rp->sequence_id,
-				res->type, res->tag, res->val,
-				atomic_read(&res->refcount));
-		atomic_inc(&res->refcount);
-		res->tag = tag;
-		res->flags &= ~DPU_CRTC_RES_FLAG_FREE;
-		mutex_unlock(rp->rp_lock);
-		return res->val;
-	}
-	/* not in this rp, try to grab from global pool */
-	if (rp->ops.get)
-		val = rp->ops.get(NULL, type, -1);
-	if (!IS_ERR_OR_NULL(val))
-		goto add_res;
-	/*
-	 * Search older resource pools for hw blk with matching type,
-	 * necessary when resource is being used by this object,
-	 * but in previous states not yet cleaned up.
-	 *
-	 * This enables searching of all resources currently owned
-	 * by this crtc even though the resource might not be used
-	 * in the current atomic state. This allows those resources
-	 * to be re-acquired by the new atomic state immediately
-	 * without waiting for the resources to be fully released.
-	 */
-	else if (IS_ERR_OR_NULL(val) && (type < DPU_HW_BLK_MAX)) {
-		list_for_each_entry(old_rp, rp->rp_head, rp_list) {
-			if (old_rp == rp)
-				continue;
-
-			list_for_each_entry(res, &old_rp->res_list, list) {
-				if (res->type != type)
-					continue;
-				DRM_DEBUG_KMS("crtc%d.%u in crtc%d.%d\n",
-					      crtc->base.id, rp->sequence_id,
-					      crtc->base.id,
-					      old_rp->sequence_id);
-				if (res->ops.get)
-					res->ops.get(res->val, 0, -1);
-				val = res->val;
-				break;
-			}
-
-			if (!IS_ERR_OR_NULL(val))
-				break;
-		}
-	}
-	if (IS_ERR_OR_NULL(val)) {
-		DPU_DEBUG("crtc%d.%u failed to get res:0x%x//\n",
-				crtc->base.id, rp->sequence_id, type);
-		mutex_unlock(rp->rp_lock);
-		return NULL;
-	}
-add_res:
-	rc = _dpu_crtc_rp_add_no_lock(rp, type, tag, val, &rp->ops);
-	if (rc) {
-		DPU_ERROR("crtc%d.%u failed to add res:0x%x/0x%llx\n",
-				crtc->base.id, rp->sequence_id, type, tag);
-		if (rp->ops.put)
-			rp->ops.put(val);
-		val = NULL;
-	}
-	mutex_unlock(rp->rp_lock);
-	return val;
-}
-
-/**
- * _dpu_crtc_rp_put - return given resource to resource pool
- * @rp: Pointer to original resource pool
- * @type: Resource type
- * @tag: Search tag for given resource
- * return: None
- */
-static void _dpu_crtc_rp_put(struct dpu_crtc_respool *rp, u32 type, u64 tag)
-{
-	struct dpu_crtc_res *res, *next;
-	struct drm_crtc *crtc;
-
-	if (!rp) {
-		DPU_ERROR("invalid resource pool\n");
-		return;
-	}
-
-	crtc = _dpu_crtc_rp_to_crtc(rp);
-	if (!crtc) {
-		DPU_ERROR("invalid crtc\n");
-		return;
-	}
-
-	mutex_lock(rp->rp_lock);
-	list_for_each_entry_safe(res, next, &rp->res_list, list) {
-		if (res->type != type || res->tag != tag)
-			continue;
-		DPU_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
-				crtc->base.id, rp->sequence_id,
-				res->type, res->tag, res->val,
-				atomic_read(&res->refcount));
-		if (res->flags & DPU_CRTC_RES_FLAG_FREE)
-			DPU_ERROR(
-				"crtc%d.%u already free res:0x%x/0x%llx/%pK/%d\n",
-					crtc->base.id, rp->sequence_id,
-					res->type, res->tag, res->val,
-					atomic_read(&res->refcount));
-		else if (atomic_dec_return(&res->refcount) == 0)
-			res->flags |= DPU_CRTC_RES_FLAG_FREE;
-
-		mutex_unlock(rp->rp_lock);
-		return;
-	}
-	DPU_ERROR("crtc%d.%u not found res:0x%x/0x%llx\n",
-			crtc->base.id, rp->sequence_id, type, tag);
-	mutex_unlock(rp->rp_lock);
-}
-
-int dpu_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
-		void *val, struct dpu_crtc_res_ops *ops)
-{
-	struct dpu_crtc_respool *rp;
-
-	if (!state) {
-		DPU_ERROR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	rp = &to_dpu_crtc_state(state)->rp;
-	return _dpu_crtc_rp_add(rp, type, tag, val, ops);
-}
-
-void *dpu_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag)
-{
-	struct dpu_crtc_respool *rp;
-	void *val;
-
-	if (!state) {
-		DPU_ERROR("invalid parameters\n");
-		return NULL;
-	}
-
-	rp = &to_dpu_crtc_state(state)->rp;
-	val = _dpu_crtc_rp_get(rp, type, tag);
-	if (IS_ERR(val)) {
-		DPU_ERROR("failed to get res type:0x%x:0x%llx\n",
-				type, tag);
-		return NULL;
-	}
-
-	return val;
-}
-
-void dpu_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag)
-{
-	struct dpu_crtc_respool *rp;
-
-	if (!state) {
-		DPU_ERROR("invalid parameters\n");
-		return;
-	}
-
-	rp = &to_dpu_crtc_state(state)->rp;
-	_dpu_crtc_rp_put(rp, type, tag);
-}
-
 static void dpu_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
@@ -1604,6 +1323,7 @@
 	struct drm_encoder *encoder;
 	struct msm_drm_private *priv;
 	int ret;
+	unsigned long flags;
 
 	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
 		DPU_ERROR("invalid crtc\n");
@@ -1619,6 +1339,9 @@
 	if (dpu_kms_is_suspend_state(crtc->dev))
 		_dpu_crtc_set_suspend(crtc, true);
 
+	/* Disable/save vblank irq handling */
+	drm_crtc_vblank_off(crtc);
+
 	mutex_lock(&dpu_crtc->crtc_lock);
 
 	/* wait for frame_event_done completion */
@@ -1656,7 +1379,6 @@
 		dpu_power_handle_unregister_event(dpu_crtc->phandle,
 				dpu_crtc->power_event);
 
-
 	memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
 	dpu_crtc->num_mixers = 0;
 	dpu_crtc->mixers_swapped = false;
@@ -1666,6 +1388,13 @@
 	cstate->bw_split_vote = false;
 
 	mutex_unlock(&dpu_crtc->crtc_lock);
+
+	if (crtc->state->event && !crtc->state->active) {
+		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+		drm_crtc_send_vblank_event(crtc, crtc->state->event);
+		crtc->state->event = NULL;
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+	}
 }
 
 static void dpu_crtc_enable(struct drm_crtc *crtc,
@@ -1705,6 +1434,9 @@
 
 	mutex_unlock(&dpu_crtc->crtc_lock);
 
+	/* Enable/restore vblank irq handling */
+	drm_crtc_vblank_on(crtc);
+
 	dpu_crtc->power_event = dpu_power_handle_register_event(
 		dpu_crtc->phandle,
 		DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE |
@@ -1803,8 +1535,7 @@
 		cnt++;
 
 		dst = drm_plane_state_dest(pstate);
-		if (!drm_rect_intersect(&clip, &dst) ||
-		    !drm_rect_equals(&clip, &dst)) {
+		if (!drm_rect_intersect(&clip, &dst)) {
 			DPU_ERROR("invalid vertical/horizontal destination\n");
 			DPU_ERROR("display: " DRM_RECT_FMT " plane: "
 				  DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
@@ -2349,97 +2080,6 @@
 	.atomic_flush = dpu_crtc_atomic_flush,
 };
 
-static void _dpu_crtc_event_cb(struct kthread_work *work)
-{
-	struct dpu_crtc_event *event;
-	struct dpu_crtc *dpu_crtc;
-	unsigned long irq_flags;
-
-	if (!work) {
-		DPU_ERROR("invalid work item\n");
-		return;
-	}
-
-	event = container_of(work, struct dpu_crtc_event, kt_work);
-
-	/* set dpu_crtc to NULL for static work structures */
-	dpu_crtc = event->dpu_crtc;
-	if (!dpu_crtc)
-		return;
-
-	if (event->cb_func)
-		event->cb_func(&dpu_crtc->base, event->usr);
-
-	spin_lock_irqsave(&dpu_crtc->event_lock, irq_flags);
-	list_add_tail(&event->list, &dpu_crtc->event_free_list);
-	spin_unlock_irqrestore(&dpu_crtc->event_lock, irq_flags);
-}
-
-int dpu_crtc_event_queue(struct drm_crtc *crtc,
-		void (*func)(struct drm_crtc *crtc, void *usr), void *usr)
-{
-	unsigned long irq_flags;
-	struct dpu_crtc *dpu_crtc;
-	struct msm_drm_private *priv;
-	struct dpu_crtc_event *event = NULL;
-	u32 crtc_id;
-
-	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !func) {
-		DPU_ERROR("invalid parameters\n");
-		return -EINVAL;
-	}
-	dpu_crtc = to_dpu_crtc(crtc);
-	priv = crtc->dev->dev_private;
-	crtc_id = drm_crtc_index(crtc);
-
-	/*
-	 * Obtain an event struct from the private cache. This event
-	 * queue may be called from ISR contexts, so use a private
-	 * cache to avoid calling any memory allocation functions.
-	 */
-	spin_lock_irqsave(&dpu_crtc->event_lock, irq_flags);
-	if (!list_empty(&dpu_crtc->event_free_list)) {
-		event = list_first_entry(&dpu_crtc->event_free_list,
-				struct dpu_crtc_event, list);
-		list_del_init(&event->list);
-	}
-	spin_unlock_irqrestore(&dpu_crtc->event_lock, irq_flags);
-
-	if (!event)
-		return -ENOMEM;
-
-	/* populate event node */
-	event->dpu_crtc = dpu_crtc;
-	event->cb_func = func;
-	event->usr = usr;
-
-	/* queue new event request */
-	kthread_init_work(&event->kt_work, _dpu_crtc_event_cb);
-	kthread_queue_work(&priv->event_thread[crtc_id].worker,
-			&event->kt_work);
-
-	return 0;
-}
-
-static int _dpu_crtc_init_events(struct dpu_crtc *dpu_crtc)
-{
-	int i, rc = 0;
-
-	if (!dpu_crtc) {
-		DPU_ERROR("invalid crtc\n");
-		return -EINVAL;
-	}
-
-	spin_lock_init(&dpu_crtc->event_lock);
-
-	INIT_LIST_HEAD(&dpu_crtc->event_free_list);
-	for (i = 0; i < DPU_CRTC_MAX_EVENT_COUNT; ++i)
-		list_add_tail(&dpu_crtc->event_cache[i].list,
-				&dpu_crtc->event_free_list);
-
-	return rc;
-}
-
 /* initialize crtc */
 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
 {
@@ -2447,7 +2087,7 @@
 	struct dpu_crtc *dpu_crtc = NULL;
 	struct msm_drm_private *priv = NULL;
 	struct dpu_kms *kms = NULL;
-	int i, rc;
+	int i;
 
 	priv = dev->dev_private;
 	kms = to_dpu_kms(priv->kms);
@@ -2487,12 +2127,7 @@
 	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
 
 	/* initialize event handling */
-	rc = _dpu_crtc_init_events(dpu_crtc);
-	if (rc) {
-		drm_crtc_cleanup(crtc);
-		kfree(dpu_crtc);
-		return ERR_PTR(rc);
-	}
+	spin_lock_init(&dpu_crtc->event_lock);
 
 	dpu_crtc->phandle = &kms->phandle;
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
new file mode 100644
index 0000000..e87109e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_CRTC_H_
+#define _DPU_CRTC_H_
+
+#include <linux/kthread.h>
+#include <drm/drm_crtc.h>
+#include "dpu_kms.h"
+#include "dpu_core_perf.h"
+#include "dpu_hw_blk.h"
+
+#define DPU_CRTC_NAME_SIZE	12
+
+/* define the maximum number of in-flight frame events */
+#define DPU_CRTC_FRAME_EVENT_SIZE	4
+
+/**
+ * enum dpu_crtc_client_type: crtc client type
+ * @RT_CLIENT:	RealTime client like video/cmd mode display
+ *              voting through apps rsc
+ * @NRT_CLIENT:	Non-RealTime client like WB display
+ *              voting through apps rsc
+ */
+enum dpu_crtc_client_type {
+	RT_CLIENT,
+	NRT_CLIENT,
+};
+
+/**
+ * enum dpu_crtc_smmu_state:	smmu state
+ * @ATTACHED:	 all the context banks are attached.
+ * @DETACHED:	 all the context banks are detached.
+ * @ATTACH_ALL_REQ:	 transient state of attaching context banks.
+ * @DETACH_ALL_REQ:	 transient state of detaching context banks.
+ */
+enum dpu_crtc_smmu_state {
+	ATTACHED = 0,
+	DETACHED,
+	ATTACH_ALL_REQ,
+	DETACH_ALL_REQ,
+};
+
+/**
+ * enum dpu_crtc_smmu_state_transition_type: state transition type
+ * @NONE: no pending state transitions
+ * @PRE_COMMIT: state transitions should be done before processing the commit
+ * @POST_COMMIT: state transitions to be done after processing the commit.
+ */
+enum dpu_crtc_smmu_state_transition_type {
+	NONE,
+	PRE_COMMIT,
+	POST_COMMIT
+};
+
+/**
+ * struct dpu_crtc_smmu_state_data: stores the smmu state and transition type
+ * @state: current state of smmu context banks
+ * @transition_type: transition request type
+ * @transition_error: whether there is error while transitioning the state
+ */
+struct dpu_crtc_smmu_state_data {
+	uint32_t state;
+	uint32_t transition_type;
+	uint32_t transition_error;
+};
+
+/**
+ * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm:	LM HW Driver context
+ * @hw_ctl:	CTL Path HW driver context
+ * @encoder:	Encoder attached to this lm & ctl
+ * @mixer_op_mode:	mixer blending operation mode
+ * @flush_mask:	mixer flush mask for ctl, mixer and pipe
+ */
+struct dpu_crtc_mixer {
+	struct dpu_hw_mixer *hw_lm;
+	struct dpu_hw_ctl *hw_ctl;
+	struct drm_encoder *encoder;
+	u32 mixer_op_mode;
+	u32 flush_mask;
+};
+
+/**
+ * struct dpu_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work:	base work structure
+ * @crtc:	Pointer to crtc handling this event
+ * @list:	event list
+ * @ts:		timestamp at queue entry
+ * @event:	event identifier
+ */
+struct dpu_crtc_frame_event {
+	struct kthread_work work;
+	struct drm_crtc *crtc;
+	struct list_head list;
+	ktime_t ts;
+	u32 event;
+};
+
+/*
+ * Maximum number of free event structures to cache
+ */
+#define DPU_CRTC_MAX_EVENT_COUNT	16
+
+/**
+ * struct dpu_crtc - virtualized CRTC data structure
+ * @base          : Base drm crtc structure
+ * @name          : ASCII description of this crtc
+ * @num_ctls      : Number of ctl paths in use
+ * @num_mixers    : Number of mixers in use
+ * @mixers_swapped: Whether the mixers have been swapped for left/right update
+ *                  especially in the case of DSC Merge.
+ * @mixers        : List of active mixers
+ * @event         : Pointer to last received drm vblank event. If there is a
+ *                  pending vblank event, this will be non-null.
+ * @vsync_count   : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @stage_cfg     : H/w mixer stage configuration
+ * @debugfs_root  : Parent of debugfs node
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @play_count    : frame count between crtc enable and disable
+ * @vblank_cb_time  : ktime at vblank count reset
+ * @vblank_requested : whether the user has requested vblank events
+ * @suspend         : whether or not a suspend operation is in progress
+ * @enabled       : whether the DPU CRTC is currently enabled. updated in the
+ *                  commit-thread, not state-swap time which is earlier, so
+ *                  safe to make decisions on during VBLANK on/off work
+ * @feature_list  : list of color processing features supported on a crtc
+ * @active_list   : list of color processing features are active
+ * @dirty_list    : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
+ * @crtc_lock     : crtc lock around create, destroy and access.
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events  : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @spin_lock     : spin lock for frame event, transaction status, etc...
+ * @frame_done_comp    : for frame_event_done synchronization
+ * @event_thread  : Pointer to event handler thread
+ * @event_worker  : Event worker queue
+ * @event_lock    : Spinlock around event handling code
+ * @misr_enable   : boolean entry indicates misr enable/disable status.
+ * @misr_frame_count  : misr frame count provided by client
+ * @misr_data     : store misr data before turning off the clocks.
+ * @phandle: Pointer to power handler
+ * @power_event   : registered power event handle
+ * @cur_perf      : current performance committed to clock/bandwidth driver
+ * @rp_lock       : serialization lock for resource pool
+ * @rp_head       : list of active resource pool
+ * @scl3_cfg_lut  : qseed3 lut config
+ */
+struct dpu_crtc {
+	struct drm_crtc base;
+	char name[DPU_CRTC_NAME_SIZE];
+
+	/* HW Resources reserved for the crtc */
+	u32 num_ctls;
+	u32 num_mixers;
+	bool mixers_swapped;
+	struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+	struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
+
+	struct drm_pending_vblank_event *event;
+	u32 vsync_count;
+
+	struct dpu_hw_stage_cfg stage_cfg;
+	struct dentry *debugfs_root;
+
+	u32 vblank_cb_count;
+	u64 play_count;
+	ktime_t vblank_cb_time;
+	bool vblank_requested;
+	bool suspend;
+	bool enabled;
+
+	struct list_head feature_list;
+	struct list_head active_list;
+	struct list_head dirty_list;
+	struct list_head ad_dirty;
+	struct list_head ad_active;
+
+	struct mutex crtc_lock;
+
+	atomic_t frame_pending;
+	struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
+	struct list_head frame_event_list;
+	spinlock_t spin_lock;
+	struct completion frame_done_comp;
+
+	/* for handling internal event thread */
+	spinlock_t event_lock;
+	bool misr_enable;
+	u32 misr_frame_count;
+	u32 misr_data[CRTC_DUAL_MIXERS];
+
+	struct dpu_power_handle *phandle;
+	struct dpu_power_event *power_event;
+
+	struct dpu_core_perf_params cur_perf;
+
+	struct mutex rp_lock;
+	struct list_head rp_head;
+
+	struct dpu_crtc_smmu_state_data smmu_state;
+};
+
+#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
+
+/**
+ * struct dpu_crtc_res_ops - common operations for crtc resources
+ * @get: get given resource
+ * @put: put given resource
+ */
+struct dpu_crtc_res_ops {
+	void *(*get)(void *val, u32 type, u64 tag);
+	void (*put)(void *val);
+};
+
+#define DPU_CRTC_RES_FLAG_FREE		BIT(0)
+
+/**
+ * struct dpu_crtc_res - definition of crtc resources
+ * @list: list of crtc resource
+ * @type: crtc resource type
+ * @tag: unique identifier per type
+ * @refcount: reference/usage count
+ * @ops: callback operations
+ * @val: resource handle associated with type/tag
+ * @flags: customization flags
+ */
+struct dpu_crtc_res {
+	struct list_head list;
+	u32 type;
+	u64 tag;
+	atomic_t refcount;
+	struct dpu_crtc_res_ops ops;
+	void *val;
+	u32 flags;
+};
+
+/**
+ * dpu_crtc_respool - crtc resource pool
+ * @rp_lock: pointer to serialization lock
+ * @rp_head: pointer to head of active resource pools of this crtc
+ * @rp_list: list of crtc resource pool
+ * @sequence_id: sequence identifier, incremented per state duplication
+ * @res_list: list of resource managed by this resource pool
+ * @ops: resource operations for parent resource pool
+ */
+struct dpu_crtc_respool {
+	struct mutex *rp_lock;
+	struct list_head *rp_head;
+	struct list_head rp_list;
+	u32 sequence_id;
+	struct list_head res_list;
+	struct dpu_crtc_res_ops ops;
+};
+
+/**
+ * struct dpu_crtc_state - dpu container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @is_ppsplit    : Whether current topology requires PPSplit special handling
+ * @bw_control    : true if bw/clk controlled by core bw/clk properties
+ * @bw_split_vote : true if bw controlled by llcc/dram bw properties
+ * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
+ *                  Origin top left of CRTC.
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @new_perf: new performance state being requested
+ */
+struct dpu_crtc_state {
+	struct drm_crtc_state base;
+
+	bool bw_control;
+	bool bw_split_vote;
+
+	bool is_ppsplit;
+	struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
+
+	uint64_t input_fence_timeout_ns;
+
+	struct dpu_core_perf_params new_perf;
+	struct dpu_crtc_respool rp;
+};
+
+#define to_dpu_crtc_state(x) \
+	container_of(x, struct dpu_crtc_state, base)
+
+/**
+ * dpu_crtc_get_mixer_width - get the mixer width
+ * Mixer width will be same as panel width(/2 for split)
+ */
+static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
+	struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+	u32 mixer_width;
+
+	if (!dpu_crtc || !cstate || !mode)
+		return 0;
+
+	mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
+			mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
+
+	return mixer_width;
+}
+
+/**
+ * dpu_crtc_get_mixer_height - get the mixer height
+ * Mixer height will be same as panel height
+ */
+static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
+		struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+	if (!dpu_crtc || !cstate || !mode)
+		return 0;
+
+	return mode->vdisplay;
+}
+
+/**
+ * dpu_crtc_frame_pending - retun the number of pending frames
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
+{
+	struct dpu_crtc *dpu_crtc;
+
+	if (!crtc)
+		return -EINVAL;
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	return atomic_read(&dpu_crtc->frame_pending);
+}
+
+/**
+ * dpu_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state);
+
+/**
+ * dpu_crtc_init - create a new crtc object
+ * @dev: dpu device
+ * @plane: base plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+
+/**
+ * dpu_crtc_register_custom_event - api for enabling/disabling crtc event
+ * @kms: Pointer to dpu_kms
+ * @crtc_drm: Pointer to crtc object
+ * @event: Event that client is interested
+ * @en: Flag to enable/disable the event
+ */
+int dpu_crtc_register_custom_event(struct dpu_kms *kms,
+		struct drm_crtc *crtc_drm, u32 event, bool en);
+
+/**
+ * dpu_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_get_client_type - check the crtc type- rt, nrt etc.
+ * @crtc: Pointer to crtc
+ */
+static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
+						struct drm_crtc *crtc)
+{
+	struct dpu_crtc_state *cstate =
+			crtc ? to_dpu_crtc_state(crtc->state) : NULL;
+
+	if (!cstate)
+		return NRT_CLIENT;
+
+	return RT_CLIENT;
+}
+
+/**
+ * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
+ * @crtc: Pointer to crtc
+ */
+static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
+{
+	return crtc ? crtc->enabled : false;
+}
+
+#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
new file mode 100644
index 0000000..e741d26
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
@@ -0,0 +1,2393 @@
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+#include <linux/pm_runtime.h>
+
+#include "dpu_dbg.h"
+#include "disp/dpu1/dpu_hw_catalog.h"
+
+
+#define DEFAULT_DBGBUS_DPU	DPU_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_VBIFRT	DPU_DBG_DUMP_IN_MEM
+#define REG_BASE_NAME_LEN	80
+
+#define DBGBUS_FLAGS_DSPP	BIT(0)
+#define DBGBUS_DSPP_STATUS	0x34C
+
+#define DBGBUS_NAME_DPU		"dpu"
+#define DBGBUS_NAME_VBIF_RT	"vbif_rt"
+
+/* offsets from dpu top address for the debug buses */
+#define DBGBUS_SSPP0	0x188
+#define DBGBUS_AXI_INTF	0x194
+#define DBGBUS_SSPP1	0x298
+#define DBGBUS_DSPP	0x348
+#define DBGBUS_PERIPH	0x418
+
+#define TEST_MASK(id, tp)	((id << 4) | (tp << 1) | BIT(0))
+
+/* following offsets are with respect to MDP VBIF base for DBG BUS access */
+#define MMSS_VBIF_CLKON			0x4
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL	0x210
+#define MMSS_VBIF_TEST_BUS_OUT		0x230
+
+/* Vbif error info */
+#define MMSS_VBIF_PND_ERR		0x190
+#define MMSS_VBIF_SRC_ERR		0x194
+#define MMSS_VBIF_XIN_HALT_CTRL1	0x204
+#define MMSS_VBIF_ERR_INFO		0X1a0
+#define MMSS_VBIF_ERR_INFO_1		0x1a4
+#define MMSS_VBIF_CLIENT_NUM		14
+
+/**
+ * struct dpu_dbg_reg_base - register region base.
+ *	may sub-ranges: sub-ranges are used for dumping
+ *	or may not have sub-ranges: dumping is base -> max_offset
+ * @reg_base_head: head of this node
+ * @name: register base name
+ * @base: base pointer
+ * @off: cached offset of region for manual register dumping
+ * @cnt: cached range of region for manual register dumping
+ * @max_offset: length of region
+ * @buf: buffer used for manual register dumping
+ * @buf_len:  buffer length used for manual register dumping
+ * @cb: callback for external dump function, null if not defined
+ * @cb_ptr: private pointer to callback function
+ */
+struct dpu_dbg_reg_base {
+	struct list_head reg_base_head;
+	char name[REG_BASE_NAME_LEN];
+	void __iomem *base;
+	size_t off;
+	size_t cnt;
+	size_t max_offset;
+	char *buf;
+	size_t buf_len;
+	void (*cb)(void *ptr);
+	void *cb_ptr;
+};
+
+struct dpu_debug_bus_entry {
+	u32 wr_addr;
+	u32 block_id;
+	u32 test_id;
+	void (*analyzer)(void __iomem *mem_base,
+				struct dpu_debug_bus_entry *entry, u32 val);
+};
+
+struct vbif_debug_bus_entry {
+	u32 disable_bus_addr;
+	u32 block_bus_addr;
+	u32 bit_offset;
+	u32 block_cnt;
+	u32 test_pnt_start;
+	u32 test_pnt_cnt;
+};
+
+struct dpu_dbg_debug_bus_common {
+	char *name;
+	u32 enable_mask;
+	bool include_in_deferred_work;
+	u32 flags;
+	u32 entries_size;
+	u32 *dumped_content;
+};
+
+struct dpu_dbg_dpu_debug_bus {
+	struct dpu_dbg_debug_bus_common cmn;
+	struct dpu_debug_bus_entry *entries;
+	u32 top_blk_off;
+};
+
+struct dpu_dbg_vbif_debug_bus {
+	struct dpu_dbg_debug_bus_common cmn;
+	struct vbif_debug_bus_entry *entries;
+};
+
+/**
+ * struct dpu_dbg_base - global dpu debug base structure
+ * @reg_base_list: list of register dumping regions
+ * @dev: device pointer
+ * @dump_work: work struct for deferring register dump work to separate thread
+ * @dbgbus_dpu: debug bus structure for the dpu
+ * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
+ */
+static struct dpu_dbg_base {
+	struct list_head reg_base_list;
+	struct device *dev;
+
+	struct work_struct dump_work;
+
+	struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
+	struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
+} dpu_dbg_base;
+
+static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
+		struct dpu_debug_bus_entry *entry, u32 val)
+{
+	dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
+			entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
+		struct dpu_debug_bus_entry *entry, u32 val)
+{
+	if (!(val & 0xFFF000))
+		return;
+
+	dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
+			entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
+		struct dpu_debug_bus_entry *entry, u32 val)
+{
+	if (!(val & BIT(15)))
+		return;
+
+	dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
+			entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
+		struct dpu_debug_bus_entry *entry, u32 val)
+{
+	if (!(val & BIT(15)))
+		return;
+
+	dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
+			entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
+
+	/* Unpack 0 sspp 0*/
+	{ DBGBUS_SSPP0, 50, 2 },
+	{ DBGBUS_SSPP0, 60, 2 },
+	{ DBGBUS_SSPP0, 70, 2 },
+	{ DBGBUS_SSPP0, 85, 2 },
+
+	/* Upack 0 sspp 1*/
+	{ DBGBUS_SSPP1, 50, 2 },
+	{ DBGBUS_SSPP1, 60, 2 },
+	{ DBGBUS_SSPP1, 70, 2 },
+	{ DBGBUS_SSPP1, 85, 2 },
+
+	/* scheduler */
+	{ DBGBUS_DSPP, 130, 0 },
+	{ DBGBUS_DSPP, 130, 1 },
+	{ DBGBUS_DSPP, 130, 2 },
+	{ DBGBUS_DSPP, 130, 3 },
+	{ DBGBUS_DSPP, 130, 4 },
+	{ DBGBUS_DSPP, 130, 5 },
+
+	/* qseed */
+	{ DBGBUS_SSPP0, 6, 0},
+	{ DBGBUS_SSPP0, 6, 1},
+	{ DBGBUS_SSPP0, 26, 0},
+	{ DBGBUS_SSPP0, 26, 1},
+	{ DBGBUS_SSPP1, 6, 0},
+	{ DBGBUS_SSPP1, 6, 1},
+	{ DBGBUS_SSPP1, 26, 0},
+	{ DBGBUS_SSPP1, 26, 1},
+
+	/* scale */
+	{ DBGBUS_SSPP0, 16, 0},
+	{ DBGBUS_SSPP0, 16, 1},
+	{ DBGBUS_SSPP0, 36, 0},
+	{ DBGBUS_SSPP0, 36, 1},
+	{ DBGBUS_SSPP1, 16, 0},
+	{ DBGBUS_SSPP1, 16, 1},
+	{ DBGBUS_SSPP1, 36, 0},
+	{ DBGBUS_SSPP1, 36, 1},
+
+	/* fetch sspp0 */
+
+	/* vig 0 */
+	{ DBGBUS_SSPP0, 0, 0 },
+	{ DBGBUS_SSPP0, 0, 1 },
+	{ DBGBUS_SSPP0, 0, 2 },
+	{ DBGBUS_SSPP0, 0, 3 },
+	{ DBGBUS_SSPP0, 0, 4 },
+	{ DBGBUS_SSPP0, 0, 5 },
+	{ DBGBUS_SSPP0, 0, 6 },
+	{ DBGBUS_SSPP0, 0, 7 },
+
+	{ DBGBUS_SSPP0, 1, 0 },
+	{ DBGBUS_SSPP0, 1, 1 },
+	{ DBGBUS_SSPP0, 1, 2 },
+	{ DBGBUS_SSPP0, 1, 3 },
+	{ DBGBUS_SSPP0, 1, 4 },
+	{ DBGBUS_SSPP0, 1, 5 },
+	{ DBGBUS_SSPP0, 1, 6 },
+	{ DBGBUS_SSPP0, 1, 7 },
+
+	{ DBGBUS_SSPP0, 2, 0 },
+	{ DBGBUS_SSPP0, 2, 1 },
+	{ DBGBUS_SSPP0, 2, 2 },
+	{ DBGBUS_SSPP0, 2, 3 },
+	{ DBGBUS_SSPP0, 2, 4 },
+	{ DBGBUS_SSPP0, 2, 5 },
+	{ DBGBUS_SSPP0, 2, 6 },
+	{ DBGBUS_SSPP0, 2, 7 },
+
+	{ DBGBUS_SSPP0, 4, 0 },
+	{ DBGBUS_SSPP0, 4, 1 },
+	{ DBGBUS_SSPP0, 4, 2 },
+	{ DBGBUS_SSPP0, 4, 3 },
+	{ DBGBUS_SSPP0, 4, 4 },
+	{ DBGBUS_SSPP0, 4, 5 },
+	{ DBGBUS_SSPP0, 4, 6 },
+	{ DBGBUS_SSPP0, 4, 7 },
+
+	{ DBGBUS_SSPP0, 5, 0 },
+	{ DBGBUS_SSPP0, 5, 1 },
+	{ DBGBUS_SSPP0, 5, 2 },
+	{ DBGBUS_SSPP0, 5, 3 },
+	{ DBGBUS_SSPP0, 5, 4 },
+	{ DBGBUS_SSPP0, 5, 5 },
+	{ DBGBUS_SSPP0, 5, 6 },
+	{ DBGBUS_SSPP0, 5, 7 },
+
+	/* vig 2 */
+	{ DBGBUS_SSPP0, 20, 0 },
+	{ DBGBUS_SSPP0, 20, 1 },
+	{ DBGBUS_SSPP0, 20, 2 },
+	{ DBGBUS_SSPP0, 20, 3 },
+	{ DBGBUS_SSPP0, 20, 4 },
+	{ DBGBUS_SSPP0, 20, 5 },
+	{ DBGBUS_SSPP0, 20, 6 },
+	{ DBGBUS_SSPP0, 20, 7 },
+
+	{ DBGBUS_SSPP0, 21, 0 },
+	{ DBGBUS_SSPP0, 21, 1 },
+	{ DBGBUS_SSPP0, 21, 2 },
+	{ DBGBUS_SSPP0, 21, 3 },
+	{ DBGBUS_SSPP0, 21, 4 },
+	{ DBGBUS_SSPP0, 21, 5 },
+	{ DBGBUS_SSPP0, 21, 6 },
+	{ DBGBUS_SSPP0, 21, 7 },
+
+	{ DBGBUS_SSPP0, 22, 0 },
+	{ DBGBUS_SSPP0, 22, 1 },
+	{ DBGBUS_SSPP0, 22, 2 },
+	{ DBGBUS_SSPP0, 22, 3 },
+	{ DBGBUS_SSPP0, 22, 4 },
+	{ DBGBUS_SSPP0, 22, 5 },
+	{ DBGBUS_SSPP0, 22, 6 },
+	{ DBGBUS_SSPP0, 22, 7 },
+
+	{ DBGBUS_SSPP0, 24, 0 },
+	{ DBGBUS_SSPP0, 24, 1 },
+	{ DBGBUS_SSPP0, 24, 2 },
+	{ DBGBUS_SSPP0, 24, 3 },
+	{ DBGBUS_SSPP0, 24, 4 },
+	{ DBGBUS_SSPP0, 24, 5 },
+	{ DBGBUS_SSPP0, 24, 6 },
+	{ DBGBUS_SSPP0, 24, 7 },
+
+	{ DBGBUS_SSPP0, 25, 0 },
+	{ DBGBUS_SSPP0, 25, 1 },
+	{ DBGBUS_SSPP0, 25, 2 },
+	{ DBGBUS_SSPP0, 25, 3 },
+	{ DBGBUS_SSPP0, 25, 4 },
+	{ DBGBUS_SSPP0, 25, 5 },
+	{ DBGBUS_SSPP0, 25, 6 },
+	{ DBGBUS_SSPP0, 25, 7 },
+
+	/* dma 2 */
+	{ DBGBUS_SSPP0, 30, 0 },
+	{ DBGBUS_SSPP0, 30, 1 },
+	{ DBGBUS_SSPP0, 30, 2 },
+	{ DBGBUS_SSPP0, 30, 3 },
+	{ DBGBUS_SSPP0, 30, 4 },
+	{ DBGBUS_SSPP0, 30, 5 },
+	{ DBGBUS_SSPP0, 30, 6 },
+	{ DBGBUS_SSPP0, 30, 7 },
+
+	{ DBGBUS_SSPP0, 31, 0 },
+	{ DBGBUS_SSPP0, 31, 1 },
+	{ DBGBUS_SSPP0, 31, 2 },
+	{ DBGBUS_SSPP0, 31, 3 },
+	{ DBGBUS_SSPP0, 31, 4 },
+	{ DBGBUS_SSPP0, 31, 5 },
+	{ DBGBUS_SSPP0, 31, 6 },
+	{ DBGBUS_SSPP0, 31, 7 },
+
+	{ DBGBUS_SSPP0, 32, 0 },
+	{ DBGBUS_SSPP0, 32, 1 },
+	{ DBGBUS_SSPP0, 32, 2 },
+	{ DBGBUS_SSPP0, 32, 3 },
+	{ DBGBUS_SSPP0, 32, 4 },
+	{ DBGBUS_SSPP0, 32, 5 },
+	{ DBGBUS_SSPP0, 32, 6 },
+	{ DBGBUS_SSPP0, 32, 7 },
+
+	{ DBGBUS_SSPP0, 33, 0 },
+	{ DBGBUS_SSPP0, 33, 1 },
+	{ DBGBUS_SSPP0, 33, 2 },
+	{ DBGBUS_SSPP0, 33, 3 },
+	{ DBGBUS_SSPP0, 33, 4 },
+	{ DBGBUS_SSPP0, 33, 5 },
+	{ DBGBUS_SSPP0, 33, 6 },
+	{ DBGBUS_SSPP0, 33, 7 },
+
+	{ DBGBUS_SSPP0, 34, 0 },
+	{ DBGBUS_SSPP0, 34, 1 },
+	{ DBGBUS_SSPP0, 34, 2 },
+	{ DBGBUS_SSPP0, 34, 3 },
+	{ DBGBUS_SSPP0, 34, 4 },
+	{ DBGBUS_SSPP0, 34, 5 },
+	{ DBGBUS_SSPP0, 34, 6 },
+	{ DBGBUS_SSPP0, 34, 7 },
+
+	{ DBGBUS_SSPP0, 35, 0 },
+	{ DBGBUS_SSPP0, 35, 1 },
+	{ DBGBUS_SSPP0, 35, 2 },
+	{ DBGBUS_SSPP0, 35, 3 },
+
+	/* dma 0 */
+	{ DBGBUS_SSPP0, 40, 0 },
+	{ DBGBUS_SSPP0, 40, 1 },
+	{ DBGBUS_SSPP0, 40, 2 },
+	{ DBGBUS_SSPP0, 40, 3 },
+	{ DBGBUS_SSPP0, 40, 4 },
+	{ DBGBUS_SSPP0, 40, 5 },
+	{ DBGBUS_SSPP0, 40, 6 },
+	{ DBGBUS_SSPP0, 40, 7 },
+
+	{ DBGBUS_SSPP0, 41, 0 },
+	{ DBGBUS_SSPP0, 41, 1 },
+	{ DBGBUS_SSPP0, 41, 2 },
+	{ DBGBUS_SSPP0, 41, 3 },
+	{ DBGBUS_SSPP0, 41, 4 },
+	{ DBGBUS_SSPP0, 41, 5 },
+	{ DBGBUS_SSPP0, 41, 6 },
+	{ DBGBUS_SSPP0, 41, 7 },
+
+	{ DBGBUS_SSPP0, 42, 0 },
+	{ DBGBUS_SSPP0, 42, 1 },
+	{ DBGBUS_SSPP0, 42, 2 },
+	{ DBGBUS_SSPP0, 42, 3 },
+	{ DBGBUS_SSPP0, 42, 4 },
+	{ DBGBUS_SSPP0, 42, 5 },
+	{ DBGBUS_SSPP0, 42, 6 },
+	{ DBGBUS_SSPP0, 42, 7 },
+
+	{ DBGBUS_SSPP0, 44, 0 },
+	{ DBGBUS_SSPP0, 44, 1 },
+	{ DBGBUS_SSPP0, 44, 2 },
+	{ DBGBUS_SSPP0, 44, 3 },
+	{ DBGBUS_SSPP0, 44, 4 },
+	{ DBGBUS_SSPP0, 44, 5 },
+	{ DBGBUS_SSPP0, 44, 6 },
+	{ DBGBUS_SSPP0, 44, 7 },
+
+	{ DBGBUS_SSPP0, 45, 0 },
+	{ DBGBUS_SSPP0, 45, 1 },
+	{ DBGBUS_SSPP0, 45, 2 },
+	{ DBGBUS_SSPP0, 45, 3 },
+	{ DBGBUS_SSPP0, 45, 4 },
+	{ DBGBUS_SSPP0, 45, 5 },
+	{ DBGBUS_SSPP0, 45, 6 },
+	{ DBGBUS_SSPP0, 45, 7 },
+
+	/* fetch sspp1 */
+	/* vig 1 */
+	{ DBGBUS_SSPP1, 0, 0 },
+	{ DBGBUS_SSPP1, 0, 1 },
+	{ DBGBUS_SSPP1, 0, 2 },
+	{ DBGBUS_SSPP1, 0, 3 },
+	{ DBGBUS_SSPP1, 0, 4 },
+	{ DBGBUS_SSPP1, 0, 5 },
+	{ DBGBUS_SSPP1, 0, 6 },
+	{ DBGBUS_SSPP1, 0, 7 },
+
+	{ DBGBUS_SSPP1, 1, 0 },
+	{ DBGBUS_SSPP1, 1, 1 },
+	{ DBGBUS_SSPP1, 1, 2 },
+	{ DBGBUS_SSPP1, 1, 3 },
+	{ DBGBUS_SSPP1, 1, 4 },
+	{ DBGBUS_SSPP1, 1, 5 },
+	{ DBGBUS_SSPP1, 1, 6 },
+	{ DBGBUS_SSPP1, 1, 7 },
+
+	{ DBGBUS_SSPP1, 2, 0 },
+	{ DBGBUS_SSPP1, 2, 1 },
+	{ DBGBUS_SSPP1, 2, 2 },
+	{ DBGBUS_SSPP1, 2, 3 },
+	{ DBGBUS_SSPP1, 2, 4 },
+	{ DBGBUS_SSPP1, 2, 5 },
+	{ DBGBUS_SSPP1, 2, 6 },
+	{ DBGBUS_SSPP1, 2, 7 },
+
+	{ DBGBUS_SSPP1, 4, 0 },
+	{ DBGBUS_SSPP1, 4, 1 },
+	{ DBGBUS_SSPP1, 4, 2 },
+	{ DBGBUS_SSPP1, 4, 3 },
+	{ DBGBUS_SSPP1, 4, 4 },
+	{ DBGBUS_SSPP1, 4, 5 },
+	{ DBGBUS_SSPP1, 4, 6 },
+	{ DBGBUS_SSPP1, 4, 7 },
+
+	{ DBGBUS_SSPP1, 5, 0 },
+	{ DBGBUS_SSPP1, 5, 1 },
+	{ DBGBUS_SSPP1, 5, 2 },
+	{ DBGBUS_SSPP1, 5, 3 },
+	{ DBGBUS_SSPP1, 5, 4 },
+	{ DBGBUS_SSPP1, 5, 5 },
+	{ DBGBUS_SSPP1, 5, 6 },
+	{ DBGBUS_SSPP1, 5, 7 },
+
+	/* vig 3 */
+	{ DBGBUS_SSPP1, 20, 0 },
+	{ DBGBUS_SSPP1, 20, 1 },
+	{ DBGBUS_SSPP1, 20, 2 },
+	{ DBGBUS_SSPP1, 20, 3 },
+	{ DBGBUS_SSPP1, 20, 4 },
+	{ DBGBUS_SSPP1, 20, 5 },
+	{ DBGBUS_SSPP1, 20, 6 },
+	{ DBGBUS_SSPP1, 20, 7 },
+
+	{ DBGBUS_SSPP1, 21, 0 },
+	{ DBGBUS_SSPP1, 21, 1 },
+	{ DBGBUS_SSPP1, 21, 2 },
+	{ DBGBUS_SSPP1, 21, 3 },
+	{ DBGBUS_SSPP1, 21, 4 },
+	{ DBGBUS_SSPP1, 21, 5 },
+	{ DBGBUS_SSPP1, 21, 6 },
+	{ DBGBUS_SSPP1, 21, 7 },
+
+	{ DBGBUS_SSPP1, 22, 0 },
+	{ DBGBUS_SSPP1, 22, 1 },
+	{ DBGBUS_SSPP1, 22, 2 },
+	{ DBGBUS_SSPP1, 22, 3 },
+	{ DBGBUS_SSPP1, 22, 4 },
+	{ DBGBUS_SSPP1, 22, 5 },
+	{ DBGBUS_SSPP1, 22, 6 },
+	{ DBGBUS_SSPP1, 22, 7 },
+
+	{ DBGBUS_SSPP1, 24, 0 },
+	{ DBGBUS_SSPP1, 24, 1 },
+	{ DBGBUS_SSPP1, 24, 2 },
+	{ DBGBUS_SSPP1, 24, 3 },
+	{ DBGBUS_SSPP1, 24, 4 },
+	{ DBGBUS_SSPP1, 24, 5 },
+	{ DBGBUS_SSPP1, 24, 6 },
+	{ DBGBUS_SSPP1, 24, 7 },
+
+	{ DBGBUS_SSPP1, 25, 0 },
+	{ DBGBUS_SSPP1, 25, 1 },
+	{ DBGBUS_SSPP1, 25, 2 },
+	{ DBGBUS_SSPP1, 25, 3 },
+	{ DBGBUS_SSPP1, 25, 4 },
+	{ DBGBUS_SSPP1, 25, 5 },
+	{ DBGBUS_SSPP1, 25, 6 },
+	{ DBGBUS_SSPP1, 25, 7 },
+
+	/* dma 3 */
+	{ DBGBUS_SSPP1, 30, 0 },
+	{ DBGBUS_SSPP1, 30, 1 },
+	{ DBGBUS_SSPP1, 30, 2 },
+	{ DBGBUS_SSPP1, 30, 3 },
+	{ DBGBUS_SSPP1, 30, 4 },
+	{ DBGBUS_SSPP1, 30, 5 },
+	{ DBGBUS_SSPP1, 30, 6 },
+	{ DBGBUS_SSPP1, 30, 7 },
+
+	{ DBGBUS_SSPP1, 31, 0 },
+	{ DBGBUS_SSPP1, 31, 1 },
+	{ DBGBUS_SSPP1, 31, 2 },
+	{ DBGBUS_SSPP1, 31, 3 },
+	{ DBGBUS_SSPP1, 31, 4 },
+	{ DBGBUS_SSPP1, 31, 5 },
+	{ DBGBUS_SSPP1, 31, 6 },
+	{ DBGBUS_SSPP1, 31, 7 },
+
+	{ DBGBUS_SSPP1, 32, 0 },
+	{ DBGBUS_SSPP1, 32, 1 },
+	{ DBGBUS_SSPP1, 32, 2 },
+	{ DBGBUS_SSPP1, 32, 3 },
+	{ DBGBUS_SSPP1, 32, 4 },
+	{ DBGBUS_SSPP1, 32, 5 },
+	{ DBGBUS_SSPP1, 32, 6 },
+	{ DBGBUS_SSPP1, 32, 7 },
+
+	{ DBGBUS_SSPP1, 33, 0 },
+	{ DBGBUS_SSPP1, 33, 1 },
+	{ DBGBUS_SSPP1, 33, 2 },
+	{ DBGBUS_SSPP1, 33, 3 },
+	{ DBGBUS_SSPP1, 33, 4 },
+	{ DBGBUS_SSPP1, 33, 5 },
+	{ DBGBUS_SSPP1, 33, 6 },
+	{ DBGBUS_SSPP1, 33, 7 },
+
+	{ DBGBUS_SSPP1, 34, 0 },
+	{ DBGBUS_SSPP1, 34, 1 },
+	{ DBGBUS_SSPP1, 34, 2 },
+	{ DBGBUS_SSPP1, 34, 3 },
+	{ DBGBUS_SSPP1, 34, 4 },
+	{ DBGBUS_SSPP1, 34, 5 },
+	{ DBGBUS_SSPP1, 34, 6 },
+	{ DBGBUS_SSPP1, 34, 7 },
+
+	{ DBGBUS_SSPP1, 35, 0 },
+	{ DBGBUS_SSPP1, 35, 1 },
+	{ DBGBUS_SSPP1, 35, 2 },
+
+	/* dma 1 */
+	{ DBGBUS_SSPP1, 40, 0 },
+	{ DBGBUS_SSPP1, 40, 1 },
+	{ DBGBUS_SSPP1, 40, 2 },
+	{ DBGBUS_SSPP1, 40, 3 },
+	{ DBGBUS_SSPP1, 40, 4 },
+	{ DBGBUS_SSPP1, 40, 5 },
+	{ DBGBUS_SSPP1, 40, 6 },
+	{ DBGBUS_SSPP1, 40, 7 },
+
+	{ DBGBUS_SSPP1, 41, 0 },
+	{ DBGBUS_SSPP1, 41, 1 },
+	{ DBGBUS_SSPP1, 41, 2 },
+	{ DBGBUS_SSPP1, 41, 3 },
+	{ DBGBUS_SSPP1, 41, 4 },
+	{ DBGBUS_SSPP1, 41, 5 },
+	{ DBGBUS_SSPP1, 41, 6 },
+	{ DBGBUS_SSPP1, 41, 7 },
+
+	{ DBGBUS_SSPP1, 42, 0 },
+	{ DBGBUS_SSPP1, 42, 1 },
+	{ DBGBUS_SSPP1, 42, 2 },
+	{ DBGBUS_SSPP1, 42, 3 },
+	{ DBGBUS_SSPP1, 42, 4 },
+	{ DBGBUS_SSPP1, 42, 5 },
+	{ DBGBUS_SSPP1, 42, 6 },
+	{ DBGBUS_SSPP1, 42, 7 },
+
+	{ DBGBUS_SSPP1, 44, 0 },
+	{ DBGBUS_SSPP1, 44, 1 },
+	{ DBGBUS_SSPP1, 44, 2 },
+	{ DBGBUS_SSPP1, 44, 3 },
+	{ DBGBUS_SSPP1, 44, 4 },
+	{ DBGBUS_SSPP1, 44, 5 },
+	{ DBGBUS_SSPP1, 44, 6 },
+	{ DBGBUS_SSPP1, 44, 7 },
+
+	{ DBGBUS_SSPP1, 45, 0 },
+	{ DBGBUS_SSPP1, 45, 1 },
+	{ DBGBUS_SSPP1, 45, 2 },
+	{ DBGBUS_SSPP1, 45, 3 },
+	{ DBGBUS_SSPP1, 45, 4 },
+	{ DBGBUS_SSPP1, 45, 5 },
+	{ DBGBUS_SSPP1, 45, 6 },
+	{ DBGBUS_SSPP1, 45, 7 },
+
+	/* cursor 1 */
+	{ DBGBUS_SSPP1, 80, 0 },
+	{ DBGBUS_SSPP1, 80, 1 },
+	{ DBGBUS_SSPP1, 80, 2 },
+	{ DBGBUS_SSPP1, 80, 3 },
+	{ DBGBUS_SSPP1, 80, 4 },
+	{ DBGBUS_SSPP1, 80, 5 },
+	{ DBGBUS_SSPP1, 80, 6 },
+	{ DBGBUS_SSPP1, 80, 7 },
+
+	{ DBGBUS_SSPP1, 81, 0 },
+	{ DBGBUS_SSPP1, 81, 1 },
+	{ DBGBUS_SSPP1, 81, 2 },
+	{ DBGBUS_SSPP1, 81, 3 },
+	{ DBGBUS_SSPP1, 81, 4 },
+	{ DBGBUS_SSPP1, 81, 5 },
+	{ DBGBUS_SSPP1, 81, 6 },
+	{ DBGBUS_SSPP1, 81, 7 },
+
+	{ DBGBUS_SSPP1, 82, 0 },
+	{ DBGBUS_SSPP1, 82, 1 },
+	{ DBGBUS_SSPP1, 82, 2 },
+	{ DBGBUS_SSPP1, 82, 3 },
+	{ DBGBUS_SSPP1, 82, 4 },
+	{ DBGBUS_SSPP1, 82, 5 },
+	{ DBGBUS_SSPP1, 82, 6 },
+	{ DBGBUS_SSPP1, 82, 7 },
+
+	{ DBGBUS_SSPP1, 83, 0 },
+	{ DBGBUS_SSPP1, 83, 1 },
+	{ DBGBUS_SSPP1, 83, 2 },
+	{ DBGBUS_SSPP1, 83, 3 },
+	{ DBGBUS_SSPP1, 83, 4 },
+	{ DBGBUS_SSPP1, 83, 5 },
+	{ DBGBUS_SSPP1, 83, 6 },
+	{ DBGBUS_SSPP1, 83, 7 },
+
+	{ DBGBUS_SSPP1, 84, 0 },
+	{ DBGBUS_SSPP1, 84, 1 },
+	{ DBGBUS_SSPP1, 84, 2 },
+	{ DBGBUS_SSPP1, 84, 3 },
+	{ DBGBUS_SSPP1, 84, 4 },
+	{ DBGBUS_SSPP1, 84, 5 },
+	{ DBGBUS_SSPP1, 84, 6 },
+	{ DBGBUS_SSPP1, 84, 7 },
+
+	/* dspp */
+	{ DBGBUS_DSPP, 13, 0 },
+	{ DBGBUS_DSPP, 19, 0 },
+	{ DBGBUS_DSPP, 14, 0 },
+	{ DBGBUS_DSPP, 14, 1 },
+	{ DBGBUS_DSPP, 14, 3 },
+	{ DBGBUS_DSPP, 20, 0 },
+	{ DBGBUS_DSPP, 20, 1 },
+	{ DBGBUS_DSPP, 20, 3 },
+
+	/* ppb_0 */
+	{ DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+	/* ppb_1 */
+	{ DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+	/* lm_lut */
+	{ DBGBUS_DSPP, 109, 0 },
+	{ DBGBUS_DSPP, 105, 0 },
+	{ DBGBUS_DSPP, 103, 0 },
+
+	/* tear-check */
+	{ DBGBUS_PERIPH, 63, 0 },
+	{ DBGBUS_PERIPH, 64, 0 },
+	{ DBGBUS_PERIPH, 65, 0 },
+	{ DBGBUS_PERIPH, 73, 0 },
+	{ DBGBUS_PERIPH, 74, 0 },
+
+	/* crossbar */
+	{ DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+	/* rotator */
+	{ DBGBUS_DSPP, 9, 0},
+
+	/* blend */
+	/* LM0 */
+	{ DBGBUS_DSPP, 63, 0},
+	{ DBGBUS_DSPP, 63, 1},
+	{ DBGBUS_DSPP, 63, 2},
+	{ DBGBUS_DSPP, 63, 3},
+	{ DBGBUS_DSPP, 63, 4},
+	{ DBGBUS_DSPP, 63, 5},
+	{ DBGBUS_DSPP, 63, 6},
+	{ DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 64, 0},
+	{ DBGBUS_DSPP, 64, 1},
+	{ DBGBUS_DSPP, 64, 2},
+	{ DBGBUS_DSPP, 64, 3},
+	{ DBGBUS_DSPP, 64, 4},
+	{ DBGBUS_DSPP, 64, 5},
+	{ DBGBUS_DSPP, 64, 6},
+	{ DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 65, 0},
+	{ DBGBUS_DSPP, 65, 1},
+	{ DBGBUS_DSPP, 65, 2},
+	{ DBGBUS_DSPP, 65, 3},
+	{ DBGBUS_DSPP, 65, 4},
+	{ DBGBUS_DSPP, 65, 5},
+	{ DBGBUS_DSPP, 65, 6},
+	{ DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 66, 0},
+	{ DBGBUS_DSPP, 66, 1},
+	{ DBGBUS_DSPP, 66, 2},
+	{ DBGBUS_DSPP, 66, 3},
+	{ DBGBUS_DSPP, 66, 4},
+	{ DBGBUS_DSPP, 66, 5},
+	{ DBGBUS_DSPP, 66, 6},
+	{ DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 67, 0},
+	{ DBGBUS_DSPP, 67, 1},
+	{ DBGBUS_DSPP, 67, 2},
+	{ DBGBUS_DSPP, 67, 3},
+	{ DBGBUS_DSPP, 67, 4},
+	{ DBGBUS_DSPP, 67, 5},
+	{ DBGBUS_DSPP, 67, 6},
+	{ DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 68, 0},
+	{ DBGBUS_DSPP, 68, 1},
+	{ DBGBUS_DSPP, 68, 2},
+	{ DBGBUS_DSPP, 68, 3},
+	{ DBGBUS_DSPP, 68, 4},
+	{ DBGBUS_DSPP, 68, 5},
+	{ DBGBUS_DSPP, 68, 6},
+	{ DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 69, 0},
+	{ DBGBUS_DSPP, 69, 1},
+	{ DBGBUS_DSPP, 69, 2},
+	{ DBGBUS_DSPP, 69, 3},
+	{ DBGBUS_DSPP, 69, 4},
+	{ DBGBUS_DSPP, 69, 5},
+	{ DBGBUS_DSPP, 69, 6},
+	{ DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM1 */
+	{ DBGBUS_DSPP, 70, 0},
+	{ DBGBUS_DSPP, 70, 1},
+	{ DBGBUS_DSPP, 70, 2},
+	{ DBGBUS_DSPP, 70, 3},
+	{ DBGBUS_DSPP, 70, 4},
+	{ DBGBUS_DSPP, 70, 5},
+	{ DBGBUS_DSPP, 70, 6},
+	{ DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 71, 0},
+	{ DBGBUS_DSPP, 71, 1},
+	{ DBGBUS_DSPP, 71, 2},
+	{ DBGBUS_DSPP, 71, 3},
+	{ DBGBUS_DSPP, 71, 4},
+	{ DBGBUS_DSPP, 71, 5},
+	{ DBGBUS_DSPP, 71, 6},
+	{ DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 72, 0},
+	{ DBGBUS_DSPP, 72, 1},
+	{ DBGBUS_DSPP, 72, 2},
+	{ DBGBUS_DSPP, 72, 3},
+	{ DBGBUS_DSPP, 72, 4},
+	{ DBGBUS_DSPP, 72, 5},
+	{ DBGBUS_DSPP, 72, 6},
+	{ DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 73, 0},
+	{ DBGBUS_DSPP, 73, 1},
+	{ DBGBUS_DSPP, 73, 2},
+	{ DBGBUS_DSPP, 73, 3},
+	{ DBGBUS_DSPP, 73, 4},
+	{ DBGBUS_DSPP, 73, 5},
+	{ DBGBUS_DSPP, 73, 6},
+	{ DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 74, 0},
+	{ DBGBUS_DSPP, 74, 1},
+	{ DBGBUS_DSPP, 74, 2},
+	{ DBGBUS_DSPP, 74, 3},
+	{ DBGBUS_DSPP, 74, 4},
+	{ DBGBUS_DSPP, 74, 5},
+	{ DBGBUS_DSPP, 74, 6},
+	{ DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 75, 0},
+	{ DBGBUS_DSPP, 75, 1},
+	{ DBGBUS_DSPP, 75, 2},
+	{ DBGBUS_DSPP, 75, 3},
+	{ DBGBUS_DSPP, 75, 4},
+	{ DBGBUS_DSPP, 75, 5},
+	{ DBGBUS_DSPP, 75, 6},
+	{ DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 76, 0},
+	{ DBGBUS_DSPP, 76, 1},
+	{ DBGBUS_DSPP, 76, 2},
+	{ DBGBUS_DSPP, 76, 3},
+	{ DBGBUS_DSPP, 76, 4},
+	{ DBGBUS_DSPP, 76, 5},
+	{ DBGBUS_DSPP, 76, 6},
+	{ DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM2 */
+	{ DBGBUS_DSPP, 77, 0},
+	{ DBGBUS_DSPP, 77, 1},
+	{ DBGBUS_DSPP, 77, 2},
+	{ DBGBUS_DSPP, 77, 3},
+	{ DBGBUS_DSPP, 77, 4},
+	{ DBGBUS_DSPP, 77, 5},
+	{ DBGBUS_DSPP, 77, 6},
+	{ DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 78, 0},
+	{ DBGBUS_DSPP, 78, 1},
+	{ DBGBUS_DSPP, 78, 2},
+	{ DBGBUS_DSPP, 78, 3},
+	{ DBGBUS_DSPP, 78, 4},
+	{ DBGBUS_DSPP, 78, 5},
+	{ DBGBUS_DSPP, 78, 6},
+	{ DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 79, 0},
+	{ DBGBUS_DSPP, 79, 1},
+	{ DBGBUS_DSPP, 79, 2},
+	{ DBGBUS_DSPP, 79, 3},
+	{ DBGBUS_DSPP, 79, 4},
+	{ DBGBUS_DSPP, 79, 5},
+	{ DBGBUS_DSPP, 79, 6},
+	{ DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 80, 0},
+	{ DBGBUS_DSPP, 80, 1},
+	{ DBGBUS_DSPP, 80, 2},
+	{ DBGBUS_DSPP, 80, 3},
+	{ DBGBUS_DSPP, 80, 4},
+	{ DBGBUS_DSPP, 80, 5},
+	{ DBGBUS_DSPP, 80, 6},
+	{ DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 81, 0},
+	{ DBGBUS_DSPP, 81, 1},
+	{ DBGBUS_DSPP, 81, 2},
+	{ DBGBUS_DSPP, 81, 3},
+	{ DBGBUS_DSPP, 81, 4},
+	{ DBGBUS_DSPP, 81, 5},
+	{ DBGBUS_DSPP, 81, 6},
+	{ DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 82, 0},
+	{ DBGBUS_DSPP, 82, 1},
+	{ DBGBUS_DSPP, 82, 2},
+	{ DBGBUS_DSPP, 82, 3},
+	{ DBGBUS_DSPP, 82, 4},
+	{ DBGBUS_DSPP, 82, 5},
+	{ DBGBUS_DSPP, 82, 6},
+	{ DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 83, 0},
+	{ DBGBUS_DSPP, 83, 1},
+	{ DBGBUS_DSPP, 83, 2},
+	{ DBGBUS_DSPP, 83, 3},
+	{ DBGBUS_DSPP, 83, 4},
+	{ DBGBUS_DSPP, 83, 5},
+	{ DBGBUS_DSPP, 83, 6},
+	{ DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+	/* csc */
+	{ DBGBUS_SSPP0, 7, 0},
+	{ DBGBUS_SSPP0, 7, 1},
+	{ DBGBUS_SSPP0, 27, 0},
+	{ DBGBUS_SSPP0, 27, 1},
+	{ DBGBUS_SSPP1, 7, 0},
+	{ DBGBUS_SSPP1, 7, 1},
+	{ DBGBUS_SSPP1, 27, 0},
+	{ DBGBUS_SSPP1, 27, 1},
+
+	/* pcc */
+	{ DBGBUS_SSPP0, 3,  3},
+	{ DBGBUS_SSPP0, 23, 3},
+	{ DBGBUS_SSPP0, 33, 3},
+	{ DBGBUS_SSPP0, 43, 3},
+	{ DBGBUS_SSPP1, 3,  3},
+	{ DBGBUS_SSPP1, 23, 3},
+	{ DBGBUS_SSPP1, 33, 3},
+	{ DBGBUS_SSPP1, 43, 3},
+
+	/* spa */
+	{ DBGBUS_SSPP0, 8,  0},
+	{ DBGBUS_SSPP0, 28, 0},
+	{ DBGBUS_SSPP1, 8,  0},
+	{ DBGBUS_SSPP1, 28, 0},
+	{ DBGBUS_DSPP, 13, 0},
+	{ DBGBUS_DSPP, 19, 0},
+
+	/* igc */
+	{ DBGBUS_SSPP0, 9,  0},
+	{ DBGBUS_SSPP0, 9,  1},
+	{ DBGBUS_SSPP0, 9,  3},
+	{ DBGBUS_SSPP0, 29, 0},
+	{ DBGBUS_SSPP0, 29, 1},
+	{ DBGBUS_SSPP0, 29, 3},
+	{ DBGBUS_SSPP0, 17, 0},
+	{ DBGBUS_SSPP0, 17, 1},
+	{ DBGBUS_SSPP0, 17, 3},
+	{ DBGBUS_SSPP0, 37, 0},
+	{ DBGBUS_SSPP0, 37, 1},
+	{ DBGBUS_SSPP0, 37, 3},
+	{ DBGBUS_SSPP0, 46, 0},
+	{ DBGBUS_SSPP0, 46, 1},
+	{ DBGBUS_SSPP0, 46, 3},
+
+	{ DBGBUS_SSPP1, 9,  0},
+	{ DBGBUS_SSPP1, 9,  1},
+	{ DBGBUS_SSPP1, 9,  3},
+	{ DBGBUS_SSPP1, 29, 0},
+	{ DBGBUS_SSPP1, 29, 1},
+	{ DBGBUS_SSPP1, 29, 3},
+	{ DBGBUS_SSPP1, 17, 0},
+	{ DBGBUS_SSPP1, 17, 1},
+	{ DBGBUS_SSPP1, 17, 3},
+	{ DBGBUS_SSPP1, 37, 0},
+	{ DBGBUS_SSPP1, 37, 1},
+	{ DBGBUS_SSPP1, 37, 3},
+	{ DBGBUS_SSPP1, 46, 0},
+	{ DBGBUS_SSPP1, 46, 1},
+	{ DBGBUS_SSPP1, 46, 3},
+
+	{ DBGBUS_DSPP, 14, 0},
+	{ DBGBUS_DSPP, 14, 1},
+	{ DBGBUS_DSPP, 14, 3},
+	{ DBGBUS_DSPP, 20, 0},
+	{ DBGBUS_DSPP, 20, 1},
+	{ DBGBUS_DSPP, 20, 3},
+
+	{ DBGBUS_PERIPH, 60, 0},
+};
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
+
+	/* Unpack 0 sspp 0*/
+	{ DBGBUS_SSPP0, 50, 2 },
+	{ DBGBUS_SSPP0, 60, 2 },
+	{ DBGBUS_SSPP0, 70, 2 },
+
+	/* Upack 0 sspp 1*/
+	{ DBGBUS_SSPP1, 50, 2 },
+	{ DBGBUS_SSPP1, 60, 2 },
+	{ DBGBUS_SSPP1, 70, 2 },
+
+	/* scheduler */
+	{ DBGBUS_DSPP, 130, 0 },
+	{ DBGBUS_DSPP, 130, 1 },
+	{ DBGBUS_DSPP, 130, 2 },
+	{ DBGBUS_DSPP, 130, 3 },
+	{ DBGBUS_DSPP, 130, 4 },
+	{ DBGBUS_DSPP, 130, 5 },
+
+	/* qseed */
+	{ DBGBUS_SSPP0, 6, 0},
+	{ DBGBUS_SSPP0, 6, 1},
+	{ DBGBUS_SSPP0, 26, 0},
+	{ DBGBUS_SSPP0, 26, 1},
+	{ DBGBUS_SSPP1, 6, 0},
+	{ DBGBUS_SSPP1, 6, 1},
+	{ DBGBUS_SSPP1, 26, 0},
+	{ DBGBUS_SSPP1, 26, 1},
+
+	/* scale */
+	{ DBGBUS_SSPP0, 16, 0},
+	{ DBGBUS_SSPP0, 16, 1},
+	{ DBGBUS_SSPP0, 36, 0},
+	{ DBGBUS_SSPP0, 36, 1},
+	{ DBGBUS_SSPP1, 16, 0},
+	{ DBGBUS_SSPP1, 16, 1},
+	{ DBGBUS_SSPP1, 36, 0},
+	{ DBGBUS_SSPP1, 36, 1},
+
+	/* fetch sspp0 */
+
+	/* vig 0 */
+	{ DBGBUS_SSPP0, 0, 0 },
+	{ DBGBUS_SSPP0, 0, 1 },
+	{ DBGBUS_SSPP0, 0, 2 },
+	{ DBGBUS_SSPP0, 0, 3 },
+	{ DBGBUS_SSPP0, 0, 4 },
+	{ DBGBUS_SSPP0, 0, 5 },
+	{ DBGBUS_SSPP0, 0, 6 },
+	{ DBGBUS_SSPP0, 0, 7 },
+
+	{ DBGBUS_SSPP0, 1, 0 },
+	{ DBGBUS_SSPP0, 1, 1 },
+	{ DBGBUS_SSPP0, 1, 2 },
+	{ DBGBUS_SSPP0, 1, 3 },
+	{ DBGBUS_SSPP0, 1, 4 },
+	{ DBGBUS_SSPP0, 1, 5 },
+	{ DBGBUS_SSPP0, 1, 6 },
+	{ DBGBUS_SSPP0, 1, 7 },
+
+	{ DBGBUS_SSPP0, 2, 0 },
+	{ DBGBUS_SSPP0, 2, 1 },
+	{ DBGBUS_SSPP0, 2, 2 },
+	{ DBGBUS_SSPP0, 2, 3 },
+	{ DBGBUS_SSPP0, 2, 4 },
+	{ DBGBUS_SSPP0, 2, 5 },
+	{ DBGBUS_SSPP0, 2, 6 },
+	{ DBGBUS_SSPP0, 2, 7 },
+
+	{ DBGBUS_SSPP0, 4, 0 },
+	{ DBGBUS_SSPP0, 4, 1 },
+	{ DBGBUS_SSPP0, 4, 2 },
+	{ DBGBUS_SSPP0, 4, 3 },
+	{ DBGBUS_SSPP0, 4, 4 },
+	{ DBGBUS_SSPP0, 4, 5 },
+	{ DBGBUS_SSPP0, 4, 6 },
+	{ DBGBUS_SSPP0, 4, 7 },
+
+	{ DBGBUS_SSPP0, 5, 0 },
+	{ DBGBUS_SSPP0, 5, 1 },
+	{ DBGBUS_SSPP0, 5, 2 },
+	{ DBGBUS_SSPP0, 5, 3 },
+	{ DBGBUS_SSPP0, 5, 4 },
+	{ DBGBUS_SSPP0, 5, 5 },
+	{ DBGBUS_SSPP0, 5, 6 },
+	{ DBGBUS_SSPP0, 5, 7 },
+
+	/* vig 2 */
+	{ DBGBUS_SSPP0, 20, 0 },
+	{ DBGBUS_SSPP0, 20, 1 },
+	{ DBGBUS_SSPP0, 20, 2 },
+	{ DBGBUS_SSPP0, 20, 3 },
+	{ DBGBUS_SSPP0, 20, 4 },
+	{ DBGBUS_SSPP0, 20, 5 },
+	{ DBGBUS_SSPP0, 20, 6 },
+	{ DBGBUS_SSPP0, 20, 7 },
+
+	{ DBGBUS_SSPP0, 21, 0 },
+	{ DBGBUS_SSPP0, 21, 1 },
+	{ DBGBUS_SSPP0, 21, 2 },
+	{ DBGBUS_SSPP0, 21, 3 },
+	{ DBGBUS_SSPP0, 21, 4 },
+	{ DBGBUS_SSPP0, 21, 5 },
+	{ DBGBUS_SSPP0, 21, 6 },
+	{ DBGBUS_SSPP0, 21, 7 },
+
+	{ DBGBUS_SSPP0, 22, 0 },
+	{ DBGBUS_SSPP0, 22, 1 },
+	{ DBGBUS_SSPP0, 22, 2 },
+	{ DBGBUS_SSPP0, 22, 3 },
+	{ DBGBUS_SSPP0, 22, 4 },
+	{ DBGBUS_SSPP0, 22, 5 },
+	{ DBGBUS_SSPP0, 22, 6 },
+	{ DBGBUS_SSPP0, 22, 7 },
+
+	{ DBGBUS_SSPP0, 24, 0 },
+	{ DBGBUS_SSPP0, 24, 1 },
+	{ DBGBUS_SSPP0, 24, 2 },
+	{ DBGBUS_SSPP0, 24, 3 },
+	{ DBGBUS_SSPP0, 24, 4 },
+	{ DBGBUS_SSPP0, 24, 5 },
+	{ DBGBUS_SSPP0, 24, 6 },
+	{ DBGBUS_SSPP0, 24, 7 },
+
+	{ DBGBUS_SSPP0, 25, 0 },
+	{ DBGBUS_SSPP0, 25, 1 },
+	{ DBGBUS_SSPP0, 25, 2 },
+	{ DBGBUS_SSPP0, 25, 3 },
+	{ DBGBUS_SSPP0, 25, 4 },
+	{ DBGBUS_SSPP0, 25, 5 },
+	{ DBGBUS_SSPP0, 25, 6 },
+	{ DBGBUS_SSPP0, 25, 7 },
+
+	/* dma 2 */
+	{ DBGBUS_SSPP0, 30, 0 },
+	{ DBGBUS_SSPP0, 30, 1 },
+	{ DBGBUS_SSPP0, 30, 2 },
+	{ DBGBUS_SSPP0, 30, 3 },
+	{ DBGBUS_SSPP0, 30, 4 },
+	{ DBGBUS_SSPP0, 30, 5 },
+	{ DBGBUS_SSPP0, 30, 6 },
+	{ DBGBUS_SSPP0, 30, 7 },
+
+	{ DBGBUS_SSPP0, 31, 0 },
+	{ DBGBUS_SSPP0, 31, 1 },
+	{ DBGBUS_SSPP0, 31, 2 },
+	{ DBGBUS_SSPP0, 31, 3 },
+	{ DBGBUS_SSPP0, 31, 4 },
+	{ DBGBUS_SSPP0, 31, 5 },
+	{ DBGBUS_SSPP0, 31, 6 },
+	{ DBGBUS_SSPP0, 31, 7 },
+
+	{ DBGBUS_SSPP0, 32, 0 },
+	{ DBGBUS_SSPP0, 32, 1 },
+	{ DBGBUS_SSPP0, 32, 2 },
+	{ DBGBUS_SSPP0, 32, 3 },
+	{ DBGBUS_SSPP0, 32, 4 },
+	{ DBGBUS_SSPP0, 32, 5 },
+	{ DBGBUS_SSPP0, 32, 6 },
+	{ DBGBUS_SSPP0, 32, 7 },
+
+	{ DBGBUS_SSPP0, 33, 0 },
+	{ DBGBUS_SSPP0, 33, 1 },
+	{ DBGBUS_SSPP0, 33, 2 },
+	{ DBGBUS_SSPP0, 33, 3 },
+	{ DBGBUS_SSPP0, 33, 4 },
+	{ DBGBUS_SSPP0, 33, 5 },
+	{ DBGBUS_SSPP0, 33, 6 },
+	{ DBGBUS_SSPP0, 33, 7 },
+
+	{ DBGBUS_SSPP0, 34, 0 },
+	{ DBGBUS_SSPP0, 34, 1 },
+	{ DBGBUS_SSPP0, 34, 2 },
+	{ DBGBUS_SSPP0, 34, 3 },
+	{ DBGBUS_SSPP0, 34, 4 },
+	{ DBGBUS_SSPP0, 34, 5 },
+	{ DBGBUS_SSPP0, 34, 6 },
+	{ DBGBUS_SSPP0, 34, 7 },
+
+	{ DBGBUS_SSPP0, 35, 0 },
+	{ DBGBUS_SSPP0, 35, 1 },
+	{ DBGBUS_SSPP0, 35, 2 },
+	{ DBGBUS_SSPP0, 35, 3 },
+
+	/* dma 0 */
+	{ DBGBUS_SSPP0, 40, 0 },
+	{ DBGBUS_SSPP0, 40, 1 },
+	{ DBGBUS_SSPP0, 40, 2 },
+	{ DBGBUS_SSPP0, 40, 3 },
+	{ DBGBUS_SSPP0, 40, 4 },
+	{ DBGBUS_SSPP0, 40, 5 },
+	{ DBGBUS_SSPP0, 40, 6 },
+	{ DBGBUS_SSPP0, 40, 7 },
+
+	{ DBGBUS_SSPP0, 41, 0 },
+	{ DBGBUS_SSPP0, 41, 1 },
+	{ DBGBUS_SSPP0, 41, 2 },
+	{ DBGBUS_SSPP0, 41, 3 },
+	{ DBGBUS_SSPP0, 41, 4 },
+	{ DBGBUS_SSPP0, 41, 5 },
+	{ DBGBUS_SSPP0, 41, 6 },
+	{ DBGBUS_SSPP0, 41, 7 },
+
+	{ DBGBUS_SSPP0, 42, 0 },
+	{ DBGBUS_SSPP0, 42, 1 },
+	{ DBGBUS_SSPP0, 42, 2 },
+	{ DBGBUS_SSPP0, 42, 3 },
+	{ DBGBUS_SSPP0, 42, 4 },
+	{ DBGBUS_SSPP0, 42, 5 },
+	{ DBGBUS_SSPP0, 42, 6 },
+	{ DBGBUS_SSPP0, 42, 7 },
+
+	{ DBGBUS_SSPP0, 44, 0 },
+	{ DBGBUS_SSPP0, 44, 1 },
+	{ DBGBUS_SSPP0, 44, 2 },
+	{ DBGBUS_SSPP0, 44, 3 },
+	{ DBGBUS_SSPP0, 44, 4 },
+	{ DBGBUS_SSPP0, 44, 5 },
+	{ DBGBUS_SSPP0, 44, 6 },
+	{ DBGBUS_SSPP0, 44, 7 },
+
+	{ DBGBUS_SSPP0, 45, 0 },
+	{ DBGBUS_SSPP0, 45, 1 },
+	{ DBGBUS_SSPP0, 45, 2 },
+	{ DBGBUS_SSPP0, 45, 3 },
+	{ DBGBUS_SSPP0, 45, 4 },
+	{ DBGBUS_SSPP0, 45, 5 },
+	{ DBGBUS_SSPP0, 45, 6 },
+	{ DBGBUS_SSPP0, 45, 7 },
+
+	/* fetch sspp1 */
+	/* vig 1 */
+	{ DBGBUS_SSPP1, 0, 0 },
+	{ DBGBUS_SSPP1, 0, 1 },
+	{ DBGBUS_SSPP1, 0, 2 },
+	{ DBGBUS_SSPP1, 0, 3 },
+	{ DBGBUS_SSPP1, 0, 4 },
+	{ DBGBUS_SSPP1, 0, 5 },
+	{ DBGBUS_SSPP1, 0, 6 },
+	{ DBGBUS_SSPP1, 0, 7 },
+
+	{ DBGBUS_SSPP1, 1, 0 },
+	{ DBGBUS_SSPP1, 1, 1 },
+	{ DBGBUS_SSPP1, 1, 2 },
+	{ DBGBUS_SSPP1, 1, 3 },
+	{ DBGBUS_SSPP1, 1, 4 },
+	{ DBGBUS_SSPP1, 1, 5 },
+	{ DBGBUS_SSPP1, 1, 6 },
+	{ DBGBUS_SSPP1, 1, 7 },
+
+	{ DBGBUS_SSPP1, 2, 0 },
+	{ DBGBUS_SSPP1, 2, 1 },
+	{ DBGBUS_SSPP1, 2, 2 },
+	{ DBGBUS_SSPP1, 2, 3 },
+	{ DBGBUS_SSPP1, 2, 4 },
+	{ DBGBUS_SSPP1, 2, 5 },
+	{ DBGBUS_SSPP1, 2, 6 },
+	{ DBGBUS_SSPP1, 2, 7 },
+
+	{ DBGBUS_SSPP1, 4, 0 },
+	{ DBGBUS_SSPP1, 4, 1 },
+	{ DBGBUS_SSPP1, 4, 2 },
+	{ DBGBUS_SSPP1, 4, 3 },
+	{ DBGBUS_SSPP1, 4, 4 },
+	{ DBGBUS_SSPP1, 4, 5 },
+	{ DBGBUS_SSPP1, 4, 6 },
+	{ DBGBUS_SSPP1, 4, 7 },
+
+	{ DBGBUS_SSPP1, 5, 0 },
+	{ DBGBUS_SSPP1, 5, 1 },
+	{ DBGBUS_SSPP1, 5, 2 },
+	{ DBGBUS_SSPP1, 5, 3 },
+	{ DBGBUS_SSPP1, 5, 4 },
+	{ DBGBUS_SSPP1, 5, 5 },
+	{ DBGBUS_SSPP1, 5, 6 },
+	{ DBGBUS_SSPP1, 5, 7 },
+
+	/* vig 3 */
+	{ DBGBUS_SSPP1, 20, 0 },
+	{ DBGBUS_SSPP1, 20, 1 },
+	{ DBGBUS_SSPP1, 20, 2 },
+	{ DBGBUS_SSPP1, 20, 3 },
+	{ DBGBUS_SSPP1, 20, 4 },
+	{ DBGBUS_SSPP1, 20, 5 },
+	{ DBGBUS_SSPP1, 20, 6 },
+	{ DBGBUS_SSPP1, 20, 7 },
+
+	{ DBGBUS_SSPP1, 21, 0 },
+	{ DBGBUS_SSPP1, 21, 1 },
+	{ DBGBUS_SSPP1, 21, 2 },
+	{ DBGBUS_SSPP1, 21, 3 },
+	{ DBGBUS_SSPP1, 21, 4 },
+	{ DBGBUS_SSPP1, 21, 5 },
+	{ DBGBUS_SSPP1, 21, 6 },
+	{ DBGBUS_SSPP1, 21, 7 },
+
+	{ DBGBUS_SSPP1, 22, 0 },
+	{ DBGBUS_SSPP1, 22, 1 },
+	{ DBGBUS_SSPP1, 22, 2 },
+	{ DBGBUS_SSPP1, 22, 3 },
+	{ DBGBUS_SSPP1, 22, 4 },
+	{ DBGBUS_SSPP1, 22, 5 },
+	{ DBGBUS_SSPP1, 22, 6 },
+	{ DBGBUS_SSPP1, 22, 7 },
+
+	{ DBGBUS_SSPP1, 24, 0 },
+	{ DBGBUS_SSPP1, 24, 1 },
+	{ DBGBUS_SSPP1, 24, 2 },
+	{ DBGBUS_SSPP1, 24, 3 },
+	{ DBGBUS_SSPP1, 24, 4 },
+	{ DBGBUS_SSPP1, 24, 5 },
+	{ DBGBUS_SSPP1, 24, 6 },
+	{ DBGBUS_SSPP1, 24, 7 },
+
+	{ DBGBUS_SSPP1, 25, 0 },
+	{ DBGBUS_SSPP1, 25, 1 },
+	{ DBGBUS_SSPP1, 25, 2 },
+	{ DBGBUS_SSPP1, 25, 3 },
+	{ DBGBUS_SSPP1, 25, 4 },
+	{ DBGBUS_SSPP1, 25, 5 },
+	{ DBGBUS_SSPP1, 25, 6 },
+	{ DBGBUS_SSPP1, 25, 7 },
+
+	/* dma 3 */
+	{ DBGBUS_SSPP1, 30, 0 },
+	{ DBGBUS_SSPP1, 30, 1 },
+	{ DBGBUS_SSPP1, 30, 2 },
+	{ DBGBUS_SSPP1, 30, 3 },
+	{ DBGBUS_SSPP1, 30, 4 },
+	{ DBGBUS_SSPP1, 30, 5 },
+	{ DBGBUS_SSPP1, 30, 6 },
+	{ DBGBUS_SSPP1, 30, 7 },
+
+	{ DBGBUS_SSPP1, 31, 0 },
+	{ DBGBUS_SSPP1, 31, 1 },
+	{ DBGBUS_SSPP1, 31, 2 },
+	{ DBGBUS_SSPP1, 31, 3 },
+	{ DBGBUS_SSPP1, 31, 4 },
+	{ DBGBUS_SSPP1, 31, 5 },
+	{ DBGBUS_SSPP1, 31, 6 },
+	{ DBGBUS_SSPP1, 31, 7 },
+
+	{ DBGBUS_SSPP1, 32, 0 },
+	{ DBGBUS_SSPP1, 32, 1 },
+	{ DBGBUS_SSPP1, 32, 2 },
+	{ DBGBUS_SSPP1, 32, 3 },
+	{ DBGBUS_SSPP1, 32, 4 },
+	{ DBGBUS_SSPP1, 32, 5 },
+	{ DBGBUS_SSPP1, 32, 6 },
+	{ DBGBUS_SSPP1, 32, 7 },
+
+	{ DBGBUS_SSPP1, 33, 0 },
+	{ DBGBUS_SSPP1, 33, 1 },
+	{ DBGBUS_SSPP1, 33, 2 },
+	{ DBGBUS_SSPP1, 33, 3 },
+	{ DBGBUS_SSPP1, 33, 4 },
+	{ DBGBUS_SSPP1, 33, 5 },
+	{ DBGBUS_SSPP1, 33, 6 },
+	{ DBGBUS_SSPP1, 33, 7 },
+
+	{ DBGBUS_SSPP1, 34, 0 },
+	{ DBGBUS_SSPP1, 34, 1 },
+	{ DBGBUS_SSPP1, 34, 2 },
+	{ DBGBUS_SSPP1, 34, 3 },
+	{ DBGBUS_SSPP1, 34, 4 },
+	{ DBGBUS_SSPP1, 34, 5 },
+	{ DBGBUS_SSPP1, 34, 6 },
+	{ DBGBUS_SSPP1, 34, 7 },
+
+	{ DBGBUS_SSPP1, 35, 0 },
+	{ DBGBUS_SSPP1, 35, 1 },
+	{ DBGBUS_SSPP1, 35, 2 },
+
+	/* dma 1 */
+	{ DBGBUS_SSPP1, 40, 0 },
+	{ DBGBUS_SSPP1, 40, 1 },
+	{ DBGBUS_SSPP1, 40, 2 },
+	{ DBGBUS_SSPP1, 40, 3 },
+	{ DBGBUS_SSPP1, 40, 4 },
+	{ DBGBUS_SSPP1, 40, 5 },
+	{ DBGBUS_SSPP1, 40, 6 },
+	{ DBGBUS_SSPP1, 40, 7 },
+
+	{ DBGBUS_SSPP1, 41, 0 },
+	{ DBGBUS_SSPP1, 41, 1 },
+	{ DBGBUS_SSPP1, 41, 2 },
+	{ DBGBUS_SSPP1, 41, 3 },
+	{ DBGBUS_SSPP1, 41, 4 },
+	{ DBGBUS_SSPP1, 41, 5 },
+	{ DBGBUS_SSPP1, 41, 6 },
+	{ DBGBUS_SSPP1, 41, 7 },
+
+	{ DBGBUS_SSPP1, 42, 0 },
+	{ DBGBUS_SSPP1, 42, 1 },
+	{ DBGBUS_SSPP1, 42, 2 },
+	{ DBGBUS_SSPP1, 42, 3 },
+	{ DBGBUS_SSPP1, 42, 4 },
+	{ DBGBUS_SSPP1, 42, 5 },
+	{ DBGBUS_SSPP1, 42, 6 },
+	{ DBGBUS_SSPP1, 42, 7 },
+
+	{ DBGBUS_SSPP1, 44, 0 },
+	{ DBGBUS_SSPP1, 44, 1 },
+	{ DBGBUS_SSPP1, 44, 2 },
+	{ DBGBUS_SSPP1, 44, 3 },
+	{ DBGBUS_SSPP1, 44, 4 },
+	{ DBGBUS_SSPP1, 44, 5 },
+	{ DBGBUS_SSPP1, 44, 6 },
+	{ DBGBUS_SSPP1, 44, 7 },
+
+	{ DBGBUS_SSPP1, 45, 0 },
+	{ DBGBUS_SSPP1, 45, 1 },
+	{ DBGBUS_SSPP1, 45, 2 },
+	{ DBGBUS_SSPP1, 45, 3 },
+	{ DBGBUS_SSPP1, 45, 4 },
+	{ DBGBUS_SSPP1, 45, 5 },
+	{ DBGBUS_SSPP1, 45, 6 },
+	{ DBGBUS_SSPP1, 45, 7 },
+
+	/* dspp */
+	{ DBGBUS_DSPP, 13, 0 },
+	{ DBGBUS_DSPP, 19, 0 },
+	{ DBGBUS_DSPP, 14, 0 },
+	{ DBGBUS_DSPP, 14, 1 },
+	{ DBGBUS_DSPP, 14, 3 },
+	{ DBGBUS_DSPP, 20, 0 },
+	{ DBGBUS_DSPP, 20, 1 },
+	{ DBGBUS_DSPP, 20, 3 },
+
+	/* ppb_0 */
+	{ DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+	/* ppb_1 */
+	{ DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+	/* lm_lut */
+	{ DBGBUS_DSPP, 109, 0 },
+	{ DBGBUS_DSPP, 105, 0 },
+	{ DBGBUS_DSPP, 103, 0 },
+
+	/* crossbar */
+	{ DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+	/* rotator */
+	{ DBGBUS_DSPP, 9, 0},
+
+	/* blend */
+	/* LM0 */
+	{ DBGBUS_DSPP, 63, 1},
+	{ DBGBUS_DSPP, 63, 2},
+	{ DBGBUS_DSPP, 63, 3},
+	{ DBGBUS_DSPP, 63, 4},
+	{ DBGBUS_DSPP, 63, 5},
+	{ DBGBUS_DSPP, 63, 6},
+	{ DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 64, 1},
+	{ DBGBUS_DSPP, 64, 2},
+	{ DBGBUS_DSPP, 64, 3},
+	{ DBGBUS_DSPP, 64, 4},
+	{ DBGBUS_DSPP, 64, 5},
+	{ DBGBUS_DSPP, 64, 6},
+	{ DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 65, 1},
+	{ DBGBUS_DSPP, 65, 2},
+	{ DBGBUS_DSPP, 65, 3},
+	{ DBGBUS_DSPP, 65, 4},
+	{ DBGBUS_DSPP, 65, 5},
+	{ DBGBUS_DSPP, 65, 6},
+	{ DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 66, 1},
+	{ DBGBUS_DSPP, 66, 2},
+	{ DBGBUS_DSPP, 66, 3},
+	{ DBGBUS_DSPP, 66, 4},
+	{ DBGBUS_DSPP, 66, 5},
+	{ DBGBUS_DSPP, 66, 6},
+	{ DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 67, 1},
+	{ DBGBUS_DSPP, 67, 2},
+	{ DBGBUS_DSPP, 67, 3},
+	{ DBGBUS_DSPP, 67, 4},
+	{ DBGBUS_DSPP, 67, 5},
+	{ DBGBUS_DSPP, 67, 6},
+	{ DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 68, 1},
+	{ DBGBUS_DSPP, 68, 2},
+	{ DBGBUS_DSPP, 68, 3},
+	{ DBGBUS_DSPP, 68, 4},
+	{ DBGBUS_DSPP, 68, 5},
+	{ DBGBUS_DSPP, 68, 6},
+	{ DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 69, 1},
+	{ DBGBUS_DSPP, 69, 2},
+	{ DBGBUS_DSPP, 69, 3},
+	{ DBGBUS_DSPP, 69, 4},
+	{ DBGBUS_DSPP, 69, 5},
+	{ DBGBUS_DSPP, 69, 6},
+	{ DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 84, 1},
+	{ DBGBUS_DSPP, 84, 2},
+	{ DBGBUS_DSPP, 84, 3},
+	{ DBGBUS_DSPP, 84, 4},
+	{ DBGBUS_DSPP, 84, 5},
+	{ DBGBUS_DSPP, 84, 6},
+	{ DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
+
+
+	{ DBGBUS_DSPP, 85, 1},
+	{ DBGBUS_DSPP, 85, 2},
+	{ DBGBUS_DSPP, 85, 3},
+	{ DBGBUS_DSPP, 85, 4},
+	{ DBGBUS_DSPP, 85, 5},
+	{ DBGBUS_DSPP, 85, 6},
+	{ DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
+
+
+	{ DBGBUS_DSPP, 86, 1},
+	{ DBGBUS_DSPP, 86, 2},
+	{ DBGBUS_DSPP, 86, 3},
+	{ DBGBUS_DSPP, 86, 4},
+	{ DBGBUS_DSPP, 86, 5},
+	{ DBGBUS_DSPP, 86, 6},
+	{ DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
+
+
+	{ DBGBUS_DSPP, 87, 1},
+	{ DBGBUS_DSPP, 87, 2},
+	{ DBGBUS_DSPP, 87, 3},
+	{ DBGBUS_DSPP, 87, 4},
+	{ DBGBUS_DSPP, 87, 5},
+	{ DBGBUS_DSPP, 87, 6},
+	{ DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM1 */
+	{ DBGBUS_DSPP, 70, 1},
+	{ DBGBUS_DSPP, 70, 2},
+	{ DBGBUS_DSPP, 70, 3},
+	{ DBGBUS_DSPP, 70, 4},
+	{ DBGBUS_DSPP, 70, 5},
+	{ DBGBUS_DSPP, 70, 6},
+	{ DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 71, 1},
+	{ DBGBUS_DSPP, 71, 2},
+	{ DBGBUS_DSPP, 71, 3},
+	{ DBGBUS_DSPP, 71, 4},
+	{ DBGBUS_DSPP, 71, 5},
+	{ DBGBUS_DSPP, 71, 6},
+	{ DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 72, 1},
+	{ DBGBUS_DSPP, 72, 2},
+	{ DBGBUS_DSPP, 72, 3},
+	{ DBGBUS_DSPP, 72, 4},
+	{ DBGBUS_DSPP, 72, 5},
+	{ DBGBUS_DSPP, 72, 6},
+	{ DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 73, 1},
+	{ DBGBUS_DSPP, 73, 2},
+	{ DBGBUS_DSPP, 73, 3},
+	{ DBGBUS_DSPP, 73, 4},
+	{ DBGBUS_DSPP, 73, 5},
+	{ DBGBUS_DSPP, 73, 6},
+	{ DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 74, 1},
+	{ DBGBUS_DSPP, 74, 2},
+	{ DBGBUS_DSPP, 74, 3},
+	{ DBGBUS_DSPP, 74, 4},
+	{ DBGBUS_DSPP, 74, 5},
+	{ DBGBUS_DSPP, 74, 6},
+	{ DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 75, 1},
+	{ DBGBUS_DSPP, 75, 2},
+	{ DBGBUS_DSPP, 75, 3},
+	{ DBGBUS_DSPP, 75, 4},
+	{ DBGBUS_DSPP, 75, 5},
+	{ DBGBUS_DSPP, 75, 6},
+	{ DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 76, 1},
+	{ DBGBUS_DSPP, 76, 2},
+	{ DBGBUS_DSPP, 76, 3},
+	{ DBGBUS_DSPP, 76, 4},
+	{ DBGBUS_DSPP, 76, 5},
+	{ DBGBUS_DSPP, 76, 6},
+	{ DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 88, 1},
+	{ DBGBUS_DSPP, 88, 2},
+	{ DBGBUS_DSPP, 88, 3},
+	{ DBGBUS_DSPP, 88, 4},
+	{ DBGBUS_DSPP, 88, 5},
+	{ DBGBUS_DSPP, 88, 6},
+	{ DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 89, 1},
+	{ DBGBUS_DSPP, 89, 2},
+	{ DBGBUS_DSPP, 89, 3},
+	{ DBGBUS_DSPP, 89, 4},
+	{ DBGBUS_DSPP, 89, 5},
+	{ DBGBUS_DSPP, 89, 6},
+	{ DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 90, 1},
+	{ DBGBUS_DSPP, 90, 2},
+	{ DBGBUS_DSPP, 90, 3},
+	{ DBGBUS_DSPP, 90, 4},
+	{ DBGBUS_DSPP, 90, 5},
+	{ DBGBUS_DSPP, 90, 6},
+	{ DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 91, 1},
+	{ DBGBUS_DSPP, 91, 2},
+	{ DBGBUS_DSPP, 91, 3},
+	{ DBGBUS_DSPP, 91, 4},
+	{ DBGBUS_DSPP, 91, 5},
+	{ DBGBUS_DSPP, 91, 6},
+	{ DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM2 */
+	{ DBGBUS_DSPP, 77, 0},
+	{ DBGBUS_DSPP, 77, 1},
+	{ DBGBUS_DSPP, 77, 2},
+	{ DBGBUS_DSPP, 77, 3},
+	{ DBGBUS_DSPP, 77, 4},
+	{ DBGBUS_DSPP, 77, 5},
+	{ DBGBUS_DSPP, 77, 6},
+	{ DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 78, 0},
+	{ DBGBUS_DSPP, 78, 1},
+	{ DBGBUS_DSPP, 78, 2},
+	{ DBGBUS_DSPP, 78, 3},
+	{ DBGBUS_DSPP, 78, 4},
+	{ DBGBUS_DSPP, 78, 5},
+	{ DBGBUS_DSPP, 78, 6},
+	{ DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 79, 0},
+	{ DBGBUS_DSPP, 79, 1},
+	{ DBGBUS_DSPP, 79, 2},
+	{ DBGBUS_DSPP, 79, 3},
+	{ DBGBUS_DSPP, 79, 4},
+	{ DBGBUS_DSPP, 79, 5},
+	{ DBGBUS_DSPP, 79, 6},
+	{ DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 80, 0},
+	{ DBGBUS_DSPP, 80, 1},
+	{ DBGBUS_DSPP, 80, 2},
+	{ DBGBUS_DSPP, 80, 3},
+	{ DBGBUS_DSPP, 80, 4},
+	{ DBGBUS_DSPP, 80, 5},
+	{ DBGBUS_DSPP, 80, 6},
+	{ DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 81, 0},
+	{ DBGBUS_DSPP, 81, 1},
+	{ DBGBUS_DSPP, 81, 2},
+	{ DBGBUS_DSPP, 81, 3},
+	{ DBGBUS_DSPP, 81, 4},
+	{ DBGBUS_DSPP, 81, 5},
+	{ DBGBUS_DSPP, 81, 6},
+	{ DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 82, 0},
+	{ DBGBUS_DSPP, 82, 1},
+	{ DBGBUS_DSPP, 82, 2},
+	{ DBGBUS_DSPP, 82, 3},
+	{ DBGBUS_DSPP, 82, 4},
+	{ DBGBUS_DSPP, 82, 5},
+	{ DBGBUS_DSPP, 82, 6},
+	{ DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 83, 0},
+	{ DBGBUS_DSPP, 83, 1},
+	{ DBGBUS_DSPP, 83, 2},
+	{ DBGBUS_DSPP, 83, 3},
+	{ DBGBUS_DSPP, 83, 4},
+	{ DBGBUS_DSPP, 83, 5},
+	{ DBGBUS_DSPP, 83, 6},
+	{ DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 92, 1},
+	{ DBGBUS_DSPP, 92, 2},
+	{ DBGBUS_DSPP, 92, 3},
+	{ DBGBUS_DSPP, 92, 4},
+	{ DBGBUS_DSPP, 92, 5},
+	{ DBGBUS_DSPP, 92, 6},
+	{ DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 93, 1},
+	{ DBGBUS_DSPP, 93, 2},
+	{ DBGBUS_DSPP, 93, 3},
+	{ DBGBUS_DSPP, 93, 4},
+	{ DBGBUS_DSPP, 93, 5},
+	{ DBGBUS_DSPP, 93, 6},
+	{ DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 94, 1},
+	{ DBGBUS_DSPP, 94, 2},
+	{ DBGBUS_DSPP, 94, 3},
+	{ DBGBUS_DSPP, 94, 4},
+	{ DBGBUS_DSPP, 94, 5},
+	{ DBGBUS_DSPP, 94, 6},
+	{ DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 95, 1},
+	{ DBGBUS_DSPP, 95, 2},
+	{ DBGBUS_DSPP, 95, 3},
+	{ DBGBUS_DSPP, 95, 4},
+	{ DBGBUS_DSPP, 95, 5},
+	{ DBGBUS_DSPP, 95, 6},
+	{ DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM5 */
+	{ DBGBUS_DSPP, 110, 1},
+	{ DBGBUS_DSPP, 110, 2},
+	{ DBGBUS_DSPP, 110, 3},
+	{ DBGBUS_DSPP, 110, 4},
+	{ DBGBUS_DSPP, 110, 5},
+	{ DBGBUS_DSPP, 110, 6},
+	{ DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 111, 1},
+	{ DBGBUS_DSPP, 111, 2},
+	{ DBGBUS_DSPP, 111, 3},
+	{ DBGBUS_DSPP, 111, 4},
+	{ DBGBUS_DSPP, 111, 5},
+	{ DBGBUS_DSPP, 111, 6},
+	{ DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 112, 1},
+	{ DBGBUS_DSPP, 112, 2},
+	{ DBGBUS_DSPP, 112, 3},
+	{ DBGBUS_DSPP, 112, 4},
+	{ DBGBUS_DSPP, 112, 5},
+	{ DBGBUS_DSPP, 112, 6},
+	{ DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 113, 1},
+	{ DBGBUS_DSPP, 113, 2},
+	{ DBGBUS_DSPP, 113, 3},
+	{ DBGBUS_DSPP, 113, 4},
+	{ DBGBUS_DSPP, 113, 5},
+	{ DBGBUS_DSPP, 113, 6},
+	{ DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 114, 1},
+	{ DBGBUS_DSPP, 114, 2},
+	{ DBGBUS_DSPP, 114, 3},
+	{ DBGBUS_DSPP, 114, 4},
+	{ DBGBUS_DSPP, 114, 5},
+	{ DBGBUS_DSPP, 114, 6},
+	{ DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 115, 1},
+	{ DBGBUS_DSPP, 115, 2},
+	{ DBGBUS_DSPP, 115, 3},
+	{ DBGBUS_DSPP, 115, 4},
+	{ DBGBUS_DSPP, 115, 5},
+	{ DBGBUS_DSPP, 115, 6},
+	{ DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 116, 1},
+	{ DBGBUS_DSPP, 116, 2},
+	{ DBGBUS_DSPP, 116, 3},
+	{ DBGBUS_DSPP, 116, 4},
+	{ DBGBUS_DSPP, 116, 5},
+	{ DBGBUS_DSPP, 116, 6},
+	{ DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 117, 1},
+	{ DBGBUS_DSPP, 117, 2},
+	{ DBGBUS_DSPP, 117, 3},
+	{ DBGBUS_DSPP, 117, 4},
+	{ DBGBUS_DSPP, 117, 5},
+	{ DBGBUS_DSPP, 117, 6},
+	{ DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 118, 1},
+	{ DBGBUS_DSPP, 118, 2},
+	{ DBGBUS_DSPP, 118, 3},
+	{ DBGBUS_DSPP, 118, 4},
+	{ DBGBUS_DSPP, 118, 5},
+	{ DBGBUS_DSPP, 118, 6},
+	{ DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 119, 1},
+	{ DBGBUS_DSPP, 119, 2},
+	{ DBGBUS_DSPP, 119, 3},
+	{ DBGBUS_DSPP, 119, 4},
+	{ DBGBUS_DSPP, 119, 5},
+	{ DBGBUS_DSPP, 119, 6},
+	{ DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 120, 1},
+	{ DBGBUS_DSPP, 120, 2},
+	{ DBGBUS_DSPP, 120, 3},
+	{ DBGBUS_DSPP, 120, 4},
+	{ DBGBUS_DSPP, 120, 5},
+	{ DBGBUS_DSPP, 120, 6},
+	{ DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
+
+	/* csc */
+	{ DBGBUS_SSPP0, 7, 0},
+	{ DBGBUS_SSPP0, 7, 1},
+	{ DBGBUS_SSPP0, 27, 0},
+	{ DBGBUS_SSPP0, 27, 1},
+	{ DBGBUS_SSPP1, 7, 0},
+	{ DBGBUS_SSPP1, 7, 1},
+	{ DBGBUS_SSPP1, 27, 0},
+	{ DBGBUS_SSPP1, 27, 1},
+
+	/* pcc */
+	{ DBGBUS_SSPP0, 3,  3},
+	{ DBGBUS_SSPP0, 23, 3},
+	{ DBGBUS_SSPP0, 33, 3},
+	{ DBGBUS_SSPP0, 43, 3},
+	{ DBGBUS_SSPP1, 3,  3},
+	{ DBGBUS_SSPP1, 23, 3},
+	{ DBGBUS_SSPP1, 33, 3},
+	{ DBGBUS_SSPP1, 43, 3},
+
+	/* spa */
+	{ DBGBUS_SSPP0, 8,  0},
+	{ DBGBUS_SSPP0, 28, 0},
+	{ DBGBUS_SSPP1, 8,  0},
+	{ DBGBUS_SSPP1, 28, 0},
+	{ DBGBUS_DSPP, 13, 0},
+	{ DBGBUS_DSPP, 19, 0},
+
+	/* igc */
+	{ DBGBUS_SSPP0, 17, 0},
+	{ DBGBUS_SSPP0, 17, 1},
+	{ DBGBUS_SSPP0, 17, 3},
+	{ DBGBUS_SSPP0, 37, 0},
+	{ DBGBUS_SSPP0, 37, 1},
+	{ DBGBUS_SSPP0, 37, 3},
+	{ DBGBUS_SSPP0, 46, 0},
+	{ DBGBUS_SSPP0, 46, 1},
+	{ DBGBUS_SSPP0, 46, 3},
+
+	{ DBGBUS_SSPP1, 17, 0},
+	{ DBGBUS_SSPP1, 17, 1},
+	{ DBGBUS_SSPP1, 17, 3},
+	{ DBGBUS_SSPP1, 37, 0},
+	{ DBGBUS_SSPP1, 37, 1},
+	{ DBGBUS_SSPP1, 37, 3},
+	{ DBGBUS_SSPP1, 46, 0},
+	{ DBGBUS_SSPP1, 46, 1},
+	{ DBGBUS_SSPP1, 46, 3},
+
+	{ DBGBUS_DSPP, 14, 0},
+	{ DBGBUS_DSPP, 14, 1},
+	{ DBGBUS_DSPP, 14, 3},
+	{ DBGBUS_DSPP, 20, 0},
+	{ DBGBUS_DSPP, 20, 1},
+	{ DBGBUS_DSPP, 20, 3},
+
+	/* intf0-3 */
+	{ DBGBUS_PERIPH, 0, 0},
+	{ DBGBUS_PERIPH, 1, 0},
+	{ DBGBUS_PERIPH, 2, 0},
+	{ DBGBUS_PERIPH, 3, 0},
+
+	/* te counter wrapper */
+	{ DBGBUS_PERIPH, 60, 0},
+
+	/* dsc0 */
+	{ DBGBUS_PERIPH, 47, 0},
+	{ DBGBUS_PERIPH, 47, 1},
+	{ DBGBUS_PERIPH, 47, 2},
+	{ DBGBUS_PERIPH, 47, 3},
+	{ DBGBUS_PERIPH, 47, 4},
+	{ DBGBUS_PERIPH, 47, 5},
+	{ DBGBUS_PERIPH, 47, 6},
+	{ DBGBUS_PERIPH, 47, 7},
+
+	/* dsc1 */
+	{ DBGBUS_PERIPH, 48, 0},
+	{ DBGBUS_PERIPH, 48, 1},
+	{ DBGBUS_PERIPH, 48, 2},
+	{ DBGBUS_PERIPH, 48, 3},
+	{ DBGBUS_PERIPH, 48, 4},
+	{ DBGBUS_PERIPH, 48, 5},
+	{ DBGBUS_PERIPH, 48, 6},
+	{ DBGBUS_PERIPH, 48, 7},
+
+	/* dsc2 */
+	{ DBGBUS_PERIPH, 51, 0},
+	{ DBGBUS_PERIPH, 51, 1},
+	{ DBGBUS_PERIPH, 51, 2},
+	{ DBGBUS_PERIPH, 51, 3},
+	{ DBGBUS_PERIPH, 51, 4},
+	{ DBGBUS_PERIPH, 51, 5},
+	{ DBGBUS_PERIPH, 51, 6},
+	{ DBGBUS_PERIPH, 51, 7},
+
+	/* dsc3 */
+	{ DBGBUS_PERIPH, 52, 0},
+	{ DBGBUS_PERIPH, 52, 1},
+	{ DBGBUS_PERIPH, 52, 2},
+	{ DBGBUS_PERIPH, 52, 3},
+	{ DBGBUS_PERIPH, 52, 4},
+	{ DBGBUS_PERIPH, 52, 5},
+	{ DBGBUS_PERIPH, 52, 6},
+	{ DBGBUS_PERIPH, 52, 7},
+
+	/* tear-check */
+	{ DBGBUS_PERIPH, 63, 0 },
+	{ DBGBUS_PERIPH, 64, 0 },
+	{ DBGBUS_PERIPH, 65, 0 },
+	{ DBGBUS_PERIPH, 73, 0 },
+	{ DBGBUS_PERIPH, 74, 0 },
+
+	/* cdwn */
+	{ DBGBUS_PERIPH, 80, 0},
+	{ DBGBUS_PERIPH, 80, 1},
+	{ DBGBUS_PERIPH, 80, 2},
+
+	{ DBGBUS_PERIPH, 81, 0},
+	{ DBGBUS_PERIPH, 81, 1},
+	{ DBGBUS_PERIPH, 81, 2},
+
+	{ DBGBUS_PERIPH, 82, 0},
+	{ DBGBUS_PERIPH, 82, 1},
+	{ DBGBUS_PERIPH, 82, 2},
+	{ DBGBUS_PERIPH, 82, 3},
+	{ DBGBUS_PERIPH, 82, 4},
+	{ DBGBUS_PERIPH, 82, 5},
+	{ DBGBUS_PERIPH, 82, 6},
+	{ DBGBUS_PERIPH, 82, 7},
+
+	/* hdmi */
+	{ DBGBUS_PERIPH, 68, 0},
+	{ DBGBUS_PERIPH, 68, 1},
+	{ DBGBUS_PERIPH, 68, 2},
+	{ DBGBUS_PERIPH, 68, 3},
+	{ DBGBUS_PERIPH, 68, 4},
+	{ DBGBUS_PERIPH, 68, 5},
+
+	/* edp */
+	{ DBGBUS_PERIPH, 69, 0},
+	{ DBGBUS_PERIPH, 69, 1},
+	{ DBGBUS_PERIPH, 69, 2},
+	{ DBGBUS_PERIPH, 69, 3},
+	{ DBGBUS_PERIPH, 69, 4},
+	{ DBGBUS_PERIPH, 69, 5},
+
+	/* dsi0 */
+	{ DBGBUS_PERIPH, 70, 0},
+	{ DBGBUS_PERIPH, 70, 1},
+	{ DBGBUS_PERIPH, 70, 2},
+	{ DBGBUS_PERIPH, 70, 3},
+	{ DBGBUS_PERIPH, 70, 4},
+	{ DBGBUS_PERIPH, 70, 5},
+
+	/* dsi1 */
+	{ DBGBUS_PERIPH, 71, 0},
+	{ DBGBUS_PERIPH, 71, 1},
+	{ DBGBUS_PERIPH, 71, 2},
+	{ DBGBUS_PERIPH, 71, 3},
+	{ DBGBUS_PERIPH, 71, 4},
+	{ DBGBUS_PERIPH, 71, 5},
+};
+
+static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
+	{0x214, 0x21c, 16, 2, 0x0, 0xd},     /* arb clients */
+	{0x214, 0x21c, 16, 2, 0x80, 0xc0},   /* arb clients */
+	{0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
+	{0x214, 0x21c, 0, 16, 0x0, 0xf},     /* xin blocks - axi side */
+	{0x214, 0x21c, 0, 16, 0x80, 0xa4},   /* xin blocks - axi side */
+	{0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
+	{0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
+};
+
+/**
+ * _dpu_dbg_enable_power - use callback to turn power on for hw register access
+ * @enable: whether to turn power on or off
+ */
+static inline void _dpu_dbg_enable_power(int enable)
+{
+	if (enable)
+		pm_runtime_get_sync(dpu_dbg_base.dev);
+	else
+		pm_runtime_put_sync(dpu_dbg_base.dev);
+}
+
+static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
+{
+	bool in_log, in_mem;
+	u32 **dump_mem = NULL;
+	u32 *dump_addr = NULL;
+	u32 status = 0;
+	struct dpu_debug_bus_entry *head;
+	dma_addr_t dma = 0;
+	int list_size;
+	int i;
+	u32 offset;
+	void __iomem *mem_base = NULL;
+	struct dpu_dbg_reg_base *reg_base;
+
+	if (!bus || !bus->cmn.entries_size)
+		return;
+
+	list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+			reg_base_head)
+		if (strlen(reg_base->name) &&
+			!strcmp(reg_base->name, bus->cmn.name))
+			mem_base = reg_base->base + bus->top_blk_off;
+
+	if (!mem_base) {
+		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+		return;
+	}
+
+	dump_mem = &bus->cmn.dumped_content;
+
+	/* will keep in memory 4 entries of 4 bytes each */
+	list_size = (bus->cmn.entries_size * 4 * 4);
+
+	in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+	in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+	if (!in_log && !in_mem)
+		return;
+
+	dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+			bus->cmn.name);
+
+	if (in_mem) {
+		if (!(*dump_mem))
+			*dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+				list_size, &dma, GFP_KERNEL);
+
+		if (*dump_mem) {
+			dump_addr = *dump_mem;
+			dev_info(dpu_dbg_base.dev,
+				"%s: start_addr:0x%pK len:0x%x\n",
+				__func__, dump_addr, list_size);
+		} else {
+			in_mem = false;
+			pr_err("dump_mem: allocation fails\n");
+		}
+	}
+
+	_dpu_dbg_enable_power(true);
+	for (i = 0; i < bus->cmn.entries_size; i++) {
+		head = bus->entries + i;
+		writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+				mem_base + head->wr_addr);
+		wmb(); /* make sure test bits were written */
+
+		if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
+			offset = DBGBUS_DSPP_STATUS;
+			/* keep DSPP test point enabled */
+			if (head->wr_addr != DBGBUS_DSPP)
+				writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
+		} else {
+			offset = head->wr_addr + 0x4;
+		}
+
+		status = readl_relaxed(mem_base + offset);
+
+		if (in_log)
+			dev_info(dpu_dbg_base.dev,
+					"waddr=0x%x blk=%d tst=%d val=0x%x\n",
+					head->wr_addr, head->block_id,
+					head->test_id, status);
+
+		if (dump_addr && in_mem) {
+			dump_addr[i*4]     = head->wr_addr;
+			dump_addr[i*4 + 1] = head->block_id;
+			dump_addr[i*4 + 2] = head->test_id;
+			dump_addr[i*4 + 3] = status;
+		}
+
+		if (head->analyzer)
+			head->analyzer(mem_base, head, status);
+
+		/* Disable debug bus once we are done */
+		writel_relaxed(0, mem_base + head->wr_addr);
+		if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
+						head->wr_addr != DBGBUS_DSPP)
+			writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
+	}
+	_dpu_dbg_enable_power(false);
+
+	dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+			bus->cmn.name);
+}
+
+static void _dpu_dbg_dump_vbif_debug_bus_entry(
+		struct vbif_debug_bus_entry *head, void __iomem *mem_base,
+		u32 *dump_addr, bool in_log)
+{
+	int i, j;
+	u32 val;
+
+	if (!dump_addr && !in_log)
+		return;
+
+	for (i = 0; i < head->block_cnt; i++) {
+		writel_relaxed(1 << (i + head->bit_offset),
+				mem_base + head->block_bus_addr);
+		/* make sure that current bus blcok enable */
+		wmb();
+		for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
+			writel_relaxed(j, mem_base + head->block_bus_addr + 4);
+			/* make sure that test point is enabled */
+			wmb();
+			val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
+			if (dump_addr) {
+				*dump_addr++ = head->block_bus_addr;
+				*dump_addr++ = i;
+				*dump_addr++ = j;
+				*dump_addr++ = val;
+			}
+			if (in_log)
+				dev_info(dpu_dbg_base.dev,
+					"testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+					head->block_bus_addr, i, j, val);
+		}
+	}
+}
+
+static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
+{
+	bool in_log, in_mem;
+	u32 **dump_mem = NULL;
+	u32 *dump_addr = NULL;
+	u32 value, d0, d1;
+	unsigned long reg, reg1, reg2;
+	struct vbif_debug_bus_entry *head;
+	dma_addr_t dma = 0;
+	int i, list_size = 0;
+	void __iomem *mem_base = NULL;
+	struct vbif_debug_bus_entry *dbg_bus;
+	u32 bus_size;
+	struct dpu_dbg_reg_base *reg_base;
+
+	if (!bus || !bus->cmn.entries_size)
+		return;
+
+	list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+			reg_base_head)
+		if (strlen(reg_base->name) &&
+			!strcmp(reg_base->name, bus->cmn.name))
+			mem_base = reg_base->base;
+
+	if (!mem_base) {
+		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+		return;
+	}
+
+	dbg_bus = bus->entries;
+	bus_size = bus->cmn.entries_size;
+	list_size = bus->cmn.entries_size;
+	dump_mem = &bus->cmn.dumped_content;
+
+	dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+			bus->cmn.name);
+
+	if (!dump_mem || !dbg_bus || !bus_size || !list_size)
+		return;
+
+	/* allocate memory for each test point */
+	for (i = 0; i < bus_size; i++) {
+		head = dbg_bus + i;
+		list_size += (head->block_cnt * head->test_pnt_cnt);
+	}
+
+	/* 4 bytes * 4 entries for each test point*/
+	list_size *= 16;
+
+	in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+	in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+	if (!in_log && !in_mem)
+		return;
+
+	if (in_mem) {
+		if (!(*dump_mem))
+			*dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+				list_size, &dma, GFP_KERNEL);
+
+		if (*dump_mem) {
+			dump_addr = *dump_mem;
+			dev_info(dpu_dbg_base.dev,
+				"%s: start_addr:0x%pK len:0x%x\n",
+				__func__, dump_addr, list_size);
+		} else {
+			in_mem = false;
+			pr_err("dump_mem: allocation fails\n");
+		}
+	}
+
+	_dpu_dbg_enable_power(true);
+
+	value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
+	writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
+
+	/* make sure that vbif core is on */
+	wmb();
+
+	/**
+	 * Extract VBIF error info based on XIN halt and error status.
+	 * If the XIN client is not in HALT state, or an error is detected,
+	 * then retrieve the VBIF error info for it.
+	 */
+	reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
+	reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
+	reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
+	dev_err(dpu_dbg_base.dev,
+			"XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
+			reg, reg1, reg2);
+	reg >>= 16;
+	reg &= ~(reg1 | reg2);
+	for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
+		if (!test_bit(0, &reg)) {
+			writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
+			/* make sure reg write goes through */
+			wmb();
+
+			d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
+			d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
+
+			dev_err(dpu_dbg_base.dev,
+					"Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
+					i, d0, d1);
+		}
+		reg >>= 1;
+	}
+
+	for (i = 0; i < bus_size; i++) {
+		head = dbg_bus + i;
+
+		writel_relaxed(0, mem_base + head->disable_bus_addr);
+		writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+		/* make sure that other bus is off */
+		wmb();
+
+		_dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
+				in_log);
+		if (dump_addr)
+			dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+	}
+
+	_dpu_dbg_enable_power(false);
+
+	dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+			bus->cmn.name);
+}
+
+/**
+ * _dpu_dump_array - dump array of register bases
+ * @name: string indicating origin of dump
+ * @dump_dbgbus_dpu: whether to dump the dpu debug bus
+ * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
+ */
+static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
+			    bool dump_dbgbus_vbif_rt)
+{
+	if (dump_dbgbus_dpu)
+		_dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
+
+	if (dump_dbgbus_vbif_rt)
+		_dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
+}
+
+/**
+ * _dpu_dump_work - deferred dump work function
+ * @work: work structure
+ */
+static void _dpu_dump_work(struct work_struct *work)
+{
+	_dpu_dump_array("dpudump_workitem",
+		dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
+		dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
+}
+
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+		  bool dump_dbgbus_vbif_rt)
+{
+	if (queue_work && work_pending(&dpu_dbg_base.dump_work))
+		return;
+
+	if (!queue_work) {
+		_dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
+		return;
+	}
+
+	/* schedule work to dump later */
+	dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
+	dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
+			dump_dbgbus_vbif_rt;
+	schedule_work(&dpu_dbg_base.dump_work);
+}
+
+/*
+ * dpu_dbg_debugfs_open - debugfs open handler for debug dump
+ * @inode: debugfs inode
+ * @file: file handle
+ */
+static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+/**
+ * dpu_dbg_dump_write - debugfs write handler for debug dump
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t dpu_dbg_dump_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	_dpu_dump_array("dump_debugfs", true, true);
+	return count;
+}
+
+static const struct file_operations dpu_dbg_dump_fops = {
+	.open = dpu_dbg_debugfs_open,
+	.write = dpu_dbg_dump_write,
+};
+
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+	static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+	char debug_name[80] = "";
+
+	if (!debugfs_root)
+		return -EINVAL;
+
+	debugfs_create_file("dump", 0600, debugfs_root, NULL,
+			&dpu_dbg_dump_fops);
+
+	if (dbg->dbgbus_dpu.entries) {
+		dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
+		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+				dbg->dbgbus_dpu.cmn.name);
+		dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
+		debugfs_create_u32(debug_name, 0600, debugfs_root,
+				&dbg->dbgbus_dpu.cmn.enable_mask);
+	}
+
+	if (dbg->dbgbus_vbif_rt.entries) {
+		dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
+		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+				dbg->dbgbus_vbif_rt.cmn.name);
+		dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
+		debugfs_create_u32(debug_name, 0600, debugfs_root,
+				&dbg->dbgbus_vbif_rt.cmn.enable_mask);
+	}
+
+	return 0;
+}
+
+static void _dpu_dbg_debugfs_destroy(void)
+{
+}
+
+void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+	static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+
+	memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
+	memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
+
+	if (IS_MSM8998_TARGET(hwversion)) {
+		dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
+		dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
+		dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+		dbg->dbgbus_vbif_rt.cmn.entries_size =
+				ARRAY_SIZE(vbif_dbg_bus_msm8998);
+	} else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
+		dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
+		dbg->dbgbus_dpu.cmn.entries_size =
+				ARRAY_SIZE(dbg_bus_dpu_sdm845);
+		dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+		/* vbif is unchanged vs 8998 */
+		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+		dbg->dbgbus_vbif_rt.cmn.entries_size =
+				ARRAY_SIZE(vbif_dbg_bus_msm8998);
+	} else {
+		pr_err("unsupported chipset id %X\n", hwversion);
+	}
+}
+
+int dpu_dbg_init(struct device *dev)
+{
+	if (!dev) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
+	dpu_dbg_base.dev = dev;
+
+	INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
+
+	return 0;
+}
+
+/**
+ * dpu_dbg_destroy - destroy dpu debug facilities
+ */
+void dpu_dbg_destroy(void)
+{
+	_dpu_dbg_debugfs_destroy();
+}
+
+void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+	dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
new file mode 100644
index 0000000..1e6fa94
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DPU_DBG_H_
+#define DPU_DBG_H_
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+enum dpu_dbg_dump_flag {
+	DPU_DBG_DUMP_IN_LOG = BIT(0),
+	DPU_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
+ * @hwversion:		Chipset revision
+ */
+void dpu_dbg_init_dbg_buses(u32 hwversion);
+
+/**
+ * dpu_dbg_init - initialize global dpu debug facilities: regdump
+ * @dev:		device handle
+ * Returns:		0 or -ERROR
+ */
+int dpu_dbg_init(struct device *dev);
+
+/**
+ * dpu_dbg_debugfs_register - register entries at the given debugfs dir
+ * @debugfs_root:	debugfs root in which to create dpu debug entries
+ * Returns:	0 or -ERROR
+ */
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
+
+/**
+ * dpu_dbg_destroy - destroy the global dpu debug facilities
+ * Returns:	none
+ */
+void dpu_dbg_destroy(void);
+
+/**
+ * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
+ * @queue_work:	  whether to queue the dumping work to the work_struct
+ * @name:	  string indicating origin of dump
+ * @dump_dbgbus:  dump the dpu debug bus
+ * @dump_vbif_rt: dump the vbif rt bus
+ * Returns:	none
+ */
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+		  bool dump_dbgbus_vbif_rt);
+
+/**
+ * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
+ *	address of the top registers. Used for accessing debug bus controls.
+ * @blk_off: offset from mdss base of the top block
+ */
+void dpu_dbg_set_dpu_top_offset(u32 blk_off);
+
+#else
+
+static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+}
+
+static inline int dpu_dbg_init(struct device *dev)
+{
+	return 0;
+}
+
+static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+	return 0;
+}
+
+static inline void dpu_dbg_destroy(void)
+{
+}
+
+static inline void dpu_dbg_dump(bool queue_work, const char *name,
+				bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
+{
+}
+
+static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+
+#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 9a401ed..ec3fd67 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -264,6 +264,9 @@
 				DPU_ENCODER_FRAME_EVENT_ERROR);
 }
 
+static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
+		int32_t hw_id, struct dpu_encoder_wait_info *info);
+
 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
 		enum dpu_intr_idx intr_idx,
 		struct dpu_encoder_wait_info *wait_info)
@@ -467,7 +470,7 @@
 	}
 }
 
-void dpu_encoder_destroy(struct drm_encoder *drm_enc)
+static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
 {
 	struct dpu_encoder_virt *dpu_enc = NULL;
 	int i = 0;
@@ -1514,7 +1517,7 @@
 	}
 }
 
-int dpu_encoder_helper_wait_event_timeout(
+static int dpu_encoder_helper_wait_event_timeout(
 		int32_t drm_id,
 		int32_t hw_id,
 		struct dpu_encoder_wait_info *info)
@@ -1625,22 +1628,6 @@
 	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
 }
 
-bool dpu_encoder_check_mode(struct drm_encoder *drm_enc, u32 mode)
-{
-	struct dpu_encoder_virt *dpu_enc;
-	struct msm_display_info *disp_info;
-
-	if (!drm_enc) {
-		DPU_ERROR("invalid encoder\n");
-		return false;
-	}
-
-	dpu_enc = to_dpu_encoder_virt(drm_enc);
-	disp_info = &dpu_enc->disp_info;
-
-	return (disp_info->capabilities & mode);
-}
-
 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
 {
 	struct dpu_encoder_virt *dpu_enc;
@@ -1901,70 +1888,6 @@
 	DPU_ATRACE_END("encoder_kickoff");
 }
 
-int dpu_encoder_helper_hw_release(struct dpu_encoder_phys *phys_enc,
-		struct drm_framebuffer *fb)
-{
-	struct drm_encoder *drm_enc;
-	struct dpu_hw_mixer_cfg mixer;
-	struct dpu_rm_hw_iter lm_iter;
-	bool lm_valid = false;
-
-	if (!phys_enc || !phys_enc->parent) {
-		DPU_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	drm_enc = phys_enc->parent;
-	memset(&mixer, 0, sizeof(mixer));
-
-	/* reset associated CTL/LMs */
-	if (phys_enc->hw_ctl->ops.clear_pending_flush)
-		phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
-	if (phys_enc->hw_ctl->ops.clear_all_blendstages)
-		phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
-
-	dpu_rm_init_hw_iter(&lm_iter, drm_enc->base.id, DPU_HW_BLK_LM);
-	while (dpu_rm_get_hw(&phys_enc->dpu_kms->rm, &lm_iter)) {
-		struct dpu_hw_mixer *hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
-
-		if (!hw_lm)
-			continue;
-
-		/* need to flush LM to remove it */
-		if (phys_enc->hw_ctl->ops.get_bitmask_mixer &&
-				phys_enc->hw_ctl->ops.update_pending_flush)
-			phys_enc->hw_ctl->ops.update_pending_flush(
-					phys_enc->hw_ctl,
-					phys_enc->hw_ctl->ops.get_bitmask_mixer(
-					phys_enc->hw_ctl, hw_lm->idx));
-
-		if (fb) {
-			/* assume a single LM if targeting a frame buffer */
-			if (lm_valid)
-				continue;
-
-			mixer.out_height = fb->height;
-			mixer.out_width = fb->width;
-
-			if (hw_lm->ops.setup_mixer_out)
-				hw_lm->ops.setup_mixer_out(hw_lm, &mixer);
-		}
-
-		lm_valid = true;
-
-		/* only enable border color on LM */
-		if (phys_enc->hw_ctl->ops.setup_blendstage)
-			phys_enc->hw_ctl->ops.setup_blendstage(
-					phys_enc->hw_ctl, hw_lm->idx, NULL);
-	}
-
-	if (!lm_valid) {
-		DPU_DEBUG_ENC(to_dpu_encoder_virt(drm_enc), "lm not found\n");
-		return -EFAULT;
-	}
-	return 0;
-}
-
 void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
 {
 	struct dpu_encoder_virt *dpu_enc;
@@ -2519,6 +2442,8 @@
 
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+		if (!phys)
+			continue;
 
 		switch (event) {
 		case MSM_ENC_COMMIT_DONE:
@@ -2536,7 +2461,7 @@
 			return -EINVAL;
 		};
 
-		if (phys && fn_wait) {
+		if (fn_wait) {
 			DPU_ATRACE_BEGIN("wait_for_completion_event");
 			ret = fn_wait(phys);
 			DPU_ATRACE_END("wait_for_completion_event");
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
new file mode 100644
index 0000000..60f809f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_ENCODER_H__
+#define __DPU_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+#include "dpu_hw_mdss.h"
+
+#define DPU_ENCODER_FRAME_EVENT_DONE			BIT(0)
+#define DPU_ENCODER_FRAME_EVENT_ERROR			BIT(1)
+#define DPU_ENCODER_FRAME_EVENT_PANEL_DEAD		BIT(2)
+#define DPU_ENCODER_FRAME_EVENT_IDLE			BIT(3)
+
+#define IDLE_TIMEOUT	(66 - 16/2)
+
+/**
+ * Encoder functions and data types
+ * @intfs:	Interfaces this encoder is using, INTF_MODE_NONE if unused
+ * @needs_cdm:	Encoder requests a CDM based on pixel format conversion needs
+ * @display_num_of_h_tiles: Number of horizontal tiles in case of split
+ *                          interface
+ * @topology:   Topology of the display
+ */
+struct dpu_encoder_hw_resources {
+	enum dpu_intf_mode intfs[INTF_MAX];
+	bool needs_cdm;
+	u32 display_num_of_h_tiles;
+};
+
+/**
+ * dpu_encoder_kickoff_params - info encoder requires at kickoff
+ * @affected_displays:  bitmask, bit set means the ROI of the commit lies within
+ *                      the bounds of the physical display at the bit index
+ */
+struct dpu_encoder_kickoff_params {
+	unsigned long affected_displays;
+};
+
+/**
+ * dpu_encoder_get_hw_resources - Populate table of required hardware resources
+ * @encoder:	encoder pointer
+ * @hw_res:	resource table to populate with encoder required resources
+ * @conn_state:	report hw reqs based on this proposed connector state
+ */
+void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
+		struct dpu_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state);
+
+/**
+ * dpu_encoder_register_vblank_callback - provide callback to encoder that
+ *	will be called on the next vblank.
+ * @encoder:	encoder pointer
+ * @cb:		callback pointer, provide NULL to deregister and disable IRQs
+ * @data:	user data provided to callback
+ */
+void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
+		void (*cb)(void *), void *data);
+
+/**
+ * dpu_encoder_register_frame_event_callback - provide callback to encoder that
+ *	will be called after the request is complete, or other events.
+ * @encoder:	encoder pointer
+ * @cb:		callback pointer, provide NULL to deregister
+ * @data:	user data provided to callback
+ */
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+		void (*cb)(void *, u32), void *data);
+
+/**
+ * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ *	path (i.e. ctl flush and start) at next appropriate time.
+ *	Immediately: if no previous commit is outstanding.
+ *	Delayed: Block until next trigger can be issued.
+ * @encoder:	encoder pointer
+ * @params:	kickoff time parameters
+ */
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
+		struct dpu_encoder_kickoff_params *params);
+
+/**
+ * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ *        kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder:	encoder pointer
+ */
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
+ *	(i.e. ctl flush and start) immediately.
+ * @encoder:	encoder pointer
+ */
+void dpu_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_wait_for_event - Waits for encoder events
+ * @encoder:	encoder pointer
+ * @event:      event to wait for
+ * MSM_ENC_COMMIT_DONE -  Wait for hardware to have flushed the current pending
+ *                        frames to hardware at a vblank or ctl_start
+ *                        Encoders will map this differently depending on the
+ *                        panel type.
+ *	                  vid mode -> vsync_irq
+ *                        cmd mode -> ctl_start
+ * MSM_ENC_TX_COMPLETE -  Wait for the hardware to transfer all the pixels to
+ *                        the panel. Encoders will map this differently
+ *                        depending on the panel type.
+ *                        vid mode -> vsync_irq
+ *                        cmd mode -> pp_done
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
+						enum msm_event_wait event);
+
+/*
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_virt_restore - restore the encoder configs
+ * @encoder:	encoder pointer
+ */
+void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_init - initialize virtual encoder object
+ * @dev:        Pointer to drm device structure
+ * @disp_info:  Pointer to display information structure
+ * Returns:     Pointer to newly created drm encoder
+ */
+struct drm_encoder *dpu_encoder_init(
+		struct drm_device *dev,
+		int drm_enc_mode);
+
+/**
+ * dpu_encoder_setup - setup dpu_encoder for the display probed
+ * @dev:		Pointer to drm device structure
+ * @enc:		Pointer to the drm_encoder
+ * @disp_info:	Pointer to the display info
+ */
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+		struct msm_display_info *disp_info);
+
+/**
+ * dpu_encoder_prepare_commit - prepare encoder at the very beginning of an
+ *	atomic commit, before any registers are written
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_set_idle_timeout - set the idle timeout for video
+ *                    and command mode encoders.
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ * @idle_timeout:    idle timeout duration in milliseconds
+ */
+void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
+							u32 idle_timeout);
+
+#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
new file mode 100644
index 0000000..c7df8aa
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_ENCODER_PHYS_H__
+#define __DPU_ENCODER_PHYS_H__
+
+#include <linux/jiffies.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_encoder.h"
+
+#define DPU_ENCODER_NAME_MAX	16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS		84
+#define KICKOFF_TIMEOUT_JIFFIES		msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum dpu_enc_split_role - Role this physical encoder will play in a
+ *	split-panel configuration, where one panel is master, and others slaves.
+ *	Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO:	This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER:	This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE:	This encoder is not the master of a split panel config.
+ */
+enum dpu_enc_split_role {
+	ENC_ROLE_SOLO,
+	ENC_ROLE_MASTER,
+	ENC_ROLE_SLAVE,
+};
+
+/**
+ * enum dpu_enc_enable_state - current enabled state of the physical encoder
+ * @DPU_ENC_DISABLING:	Encoder transitioning to disable state
+ *			Events bounding transition are encoder type specific
+ * @DPU_ENC_DISABLED:	Encoder is disabled
+ * @DPU_ENC_ENABLING:	Encoder transitioning to enabled
+ *			Events bounding transition are encoder type specific
+ * @DPU_ENC_ENABLED:	Encoder is enabled
+ * @DPU_ENC_ERR_NEEDS_HW_RESET:	Encoder is enabled, but requires a hw_reset
+ *				to recover from a previous error
+ */
+enum dpu_enc_enable_state {
+	DPU_ENC_DISABLING,
+	DPU_ENC_DISABLED,
+	DPU_ENC_ENABLING,
+	DPU_ENC_ENABLED,
+	DPU_ENC_ERR_NEEDS_HW_RESET
+};
+
+struct dpu_encoder_phys;
+
+/**
+ * struct dpu_encoder_virt_ops - Interface the containing virtual encoder
+ *	provides for the physical encoders to use to callback.
+ * @handle_vblank_virt:	Notify virtual encoder of vblank IRQ reception
+ *			Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ *			Note: This is called from IRQ handler context.
+ * @handle_frame_done:	Notify virtual encoder that this phys encoder
+ *			completes last request frame.
+ */
+struct dpu_encoder_virt_ops {
+	void (*handle_vblank_virt)(struct drm_encoder *,
+			struct dpu_encoder_phys *phys);
+	void (*handle_underrun_virt)(struct drm_encoder *,
+			struct dpu_encoder_phys *phys);
+	void (*handle_frame_done)(struct drm_encoder *,
+			struct dpu_encoder_phys *phys, u32 event);
+};
+
+/**
+ * struct dpu_encoder_phys_ops - Interface the physical encoders provide to
+ *	the containing virtual encoder.
+ * @late_register:		DRM Call. Add Userspace interfaces, debugfs.
+ * @prepare_commit:		MSM Atomic Call, start of atomic commit sequence
+ * @is_master:			Whether this phys_enc is the current master
+ *				encoder. Can be switched at enable time. Based
+ *				on split_role and current mode (CMD/VID).
+ * @mode_fixup:			DRM Call. Fixup a DRM mode.
+ * @mode_set:			DRM Call. Set a DRM mode.
+ *				This likely caches the mode, for use at enable.
+ * @enable:			DRM Call. Enable a DRM mode.
+ * @disable:			DRM Call. Disable mode.
+ * @atomic_check:		DRM Call. Atomic check new DRM state.
+ * @destroy:			DRM Call. Destroy and release resources.
+ * @get_hw_resources:		Populate the structure with the hardware
+ *				resources that this phys_enc is using.
+ *				Expect no overlap between phys_encs.
+ * @control_vblank_irq		Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done:	Wait for hardware to have flushed the
+ *				current pending frames to hardware
+ * @wait_for_tx_complete:	Wait for hardware to transfer the pixels
+ *				to the panel
+ * @wait_for_vblank:		Wait for VBLANK, for sub-driver internal use
+ * @prepare_for_kickoff:	Do any work necessary prior to a kickoff
+ *				For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff:	Do any work necessary post-kickoff work
+ * @trigger_start:		Process start event on physical encoder
+ * @needs_single_flush:		Whether encoder slaves need to be flushed
+ * @setup_misr:		Sets up MISR, enable and disables based on sysfs
+ * @collect_misr:		Collects MISR data on frame update
+ * @hw_reset:			Issue HW recovery such as CTL reset and clear
+ *				DPU_ENC_ERR_NEEDS_HW_RESET state
+ * @irq_control:		Handler to enable/disable all the encoder IRQs
+ * @prepare_idle_pc:		phys encoder can update the vsync_enable status
+ *                              on idle power collapse prepare
+ * @restore:			Restore all the encoder configs.
+ * @get_line_count:		Obtain current vertical line count
+ */
+
+struct dpu_encoder_phys_ops {
+	int (*late_register)(struct dpu_encoder_phys *encoder,
+			struct dentry *debugfs_root);
+	void (*prepare_commit)(struct dpu_encoder_phys *encoder);
+	bool (*is_master)(struct dpu_encoder_phys *encoder);
+	bool (*mode_fixup)(struct dpu_encoder_phys *encoder,
+			const struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode);
+	void (*mode_set)(struct dpu_encoder_phys *encoder,
+			struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode);
+	void (*enable)(struct dpu_encoder_phys *encoder);
+	void (*disable)(struct dpu_encoder_phys *encoder);
+	int (*atomic_check)(struct dpu_encoder_phys *encoder,
+			    struct drm_crtc_state *crtc_state,
+			    struct drm_connector_state *conn_state);
+	void (*destroy)(struct dpu_encoder_phys *encoder);
+	void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
+			struct dpu_encoder_hw_resources *hw_res,
+			struct drm_connector_state *conn_state);
+	int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
+	int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
+	int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
+	int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
+	void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc,
+			struct dpu_encoder_kickoff_params *params);
+	void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
+	void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
+	bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
+
+	void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
+				bool enable, u32 frame_count);
+	u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
+	void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
+	void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
+	void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
+	void (*restore)(struct dpu_encoder_phys *phys);
+	int (*get_line_count)(struct dpu_encoder_phys *phys);
+};
+
+/**
+ * enum dpu_intr_idx - dpu encoder interrupt index
+ * @INTR_IDX_VSYNC:    Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR:    Readpointer done unterrupt for cmd mode panel
+ */
+enum dpu_intr_idx {
+	INTR_IDX_VSYNC,
+	INTR_IDX_PINGPONG,
+	INTR_IDX_UNDERRUN,
+	INTR_IDX_CTL_START,
+	INTR_IDX_RDPTR,
+	INTR_IDX_MAX,
+};
+
+/**
+ * dpu_encoder_irq - tracking structure for interrupts
+ * @name:		string name of interrupt
+ * @intr_type:		Encoder interrupt type
+ * @intr_idx:		Encoder interrupt enumeration
+ * @hw_idx:		HW Block ID
+ * @irq_idx:		IRQ interface lookup index from DPU IRQ framework
+ *			will be -EINVAL if IRQ is not registered
+ * @irq_cb:		interrupt callback
+ */
+struct dpu_encoder_irq {
+	const char *name;
+	enum dpu_intr_type intr_type;
+	enum dpu_intr_idx intr_idx;
+	int hw_idx;
+	int irq_idx;
+	struct dpu_irq_callback cb;
+};
+
+/**
+ * struct dpu_encoder_phys - physical encoder that drives a single INTF block
+ *	tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ *	phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent:		Pointer to the containing virtual encoder
+ * @connector:		If a mode is set, cached pointer to the active connector
+ * @ops:		Operations exposed to the virtual encoder
+ * @parent_ops:		Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop:		Hardware interface to the top registers
+ * @hw_ctl:		Hardware interface to the ctl registers
+ * @hw_cdm:		Hardware interface to the cdm registers
+ * @cdm_cfg:		Chroma-down hardware configuration
+ * @hw_pp:		Hardware interface to the ping pong registers
+ * @dpu_kms:		Pointer to the dpu_kms top level
+ * @cached_mode:	DRM mode cached at mode_set time, acted on in enable
+ * @enabled:		Whether the encoder has enabled and running a mode
+ * @split_role:		Role to play in a split-panel configuration
+ * @intf_mode:		Interface mode
+ * @intf_idx:		Interface index on dpu hardware
+ * @topology_name:	topology selected for the display
+ * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state:	Enable state tracking
+ * @vblank_refcount:	Reference count of vblank request
+ * @vsync_cnt:		Vsync count for the physical encoder
+ * @underrun_cnt:	Underrun count for the physical encoder
+ * @pending_kickoff_cnt:	Atomic counter tracking the number of kickoffs
+ *				vs. the number of done/vblank irqs. Should hover
+ *				between 0-2 Incremented when a new kickoff is
+ *				scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt:	Atomic counter tracking the number of ctl start
+ *                              pending.
+ * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
+ * @irq:			IRQ tracking structures
+ */
+struct dpu_encoder_phys {
+	struct drm_encoder *parent;
+	struct drm_connector *connector;
+	struct dpu_encoder_phys_ops ops;
+	const struct dpu_encoder_virt_ops *parent_ops;
+	struct dpu_hw_mdp *hw_mdptop;
+	struct dpu_hw_ctl *hw_ctl;
+	struct dpu_hw_cdm *hw_cdm;
+	struct dpu_hw_cdm_cfg cdm_cfg;
+	struct dpu_hw_pingpong *hw_pp;
+	struct dpu_kms *dpu_kms;
+	struct drm_display_mode cached_mode;
+	enum dpu_enc_split_role split_role;
+	enum dpu_intf_mode intf_mode;
+	enum dpu_intf intf_idx;
+	enum dpu_rm_topology_name topology_name;
+	spinlock_t *enc_spinlock;
+	enum dpu_enc_enable_state enable_state;
+	atomic_t vblank_refcount;
+	atomic_t vsync_cnt;
+	atomic_t underrun_cnt;
+	atomic_t pending_ctlstart_cnt;
+	atomic_t pending_kickoff_cnt;
+	wait_queue_head_t pending_kickoff_wq;
+	struct dpu_encoder_irq irq[INTR_IDX_MAX];
+};
+
+static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
+{
+	atomic_inc_return(&phys->pending_ctlstart_cnt);
+	return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
+ *	mode specific operations
+ * @base:	Baseclass physical encoder structure
+ * @hw_intf:	Hardware interface to the intf registers
+ * @timing_params: Current timing parameter
+ */
+struct dpu_encoder_phys_vid {
+	struct dpu_encoder_phys base;
+	struct dpu_hw_intf *hw_intf;
+	struct intf_timing_params timing_params;
+};
+
+/**
+ * struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
+ *	mode specific operations
+ * @base:	Baseclass physical encoder structure
+ * @intf_idx:	Intf Block index used by this phys encoder
+ * @stream_sel:	Stream selection for multi-stream interfaces
+ * @serialize_wait4pp:	serialize wait4pp feature waits for pp_done interrupt
+ *			after ctl_start instead of before next frame kickoff
+ * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
+ * @pending_vblank_wq: Wait queue for blocking until VBLANK received
+ */
+struct dpu_encoder_phys_cmd {
+	struct dpu_encoder_phys base;
+	int stream_sel;
+	bool serialize_wait4pp;
+	int pp_timeout_report_cnt;
+	atomic_t pending_vblank_cnt;
+	wait_queue_head_t pending_vblank_wq;
+};
+
+/**
+ * struct dpu_enc_phys_init_params - initialization parameters for phys encs
+ * @dpu_kms:		Pointer to the dpu_kms top level
+ * @parent:		Pointer to the containing virtual encoder
+ * @parent_ops:		Callbacks exposed by the parent to the phys_enc
+ * @split_role:		Role to play in a split-panel configuration
+ * @intf_idx:		Interface index this phys_enc will control
+ * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct dpu_enc_phys_init_params {
+	struct dpu_kms *dpu_kms;
+	struct drm_encoder *parent;
+	const struct dpu_encoder_virt_ops *parent_ops;
+	enum dpu_enc_split_role split_role;
+	enum dpu_intf intf_idx;
+	spinlock_t *enc_spinlock;
+};
+
+/**
+ * dpu_encoder_wait_info - container for passing arguments to irq wait functions
+ * @wq: wait queue structure
+ * @atomic_cnt: wait until atomic_cnt equals zero
+ * @timeout_ms: timeout value in milliseconds
+ */
+struct dpu_encoder_wait_info {
+	wait_queue_head_t *wq;
+	atomic_t *atomic_cnt;
+	s64 timeout_ms;
+};
+
+/**
+ * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p:	Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+		struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p:	Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+		struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_helper_trigger_start - control start helper function
+ *	This helper function may be optionally specified by physical
+ *	encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
+
+/**
+ * dpu_encoder_helper_hw_reset - issue ctl hw reset
+ *	This helper function may be optionally specified by physical
+ *	encoders if they require ctl hw reset. If state is currently
+ *	DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
+
+static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
+		struct dpu_encoder_phys *phys_enc)
+{
+	if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
+		return BLEND_3D_NONE;
+
+	if (phys_enc->split_role == ENC_ROLE_SOLO &&
+	    phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+		return BLEND_3D_H_ROW_INT;
+
+	return BLEND_3D_NONE;
+}
+
+/**
+ * dpu_encoder_helper_split_config - split display configuration helper function
+ *	This helper function may be used by physical encoders to configure
+ *	the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum dpu_intf setting
+ */
+void dpu_encoder_helper_split_config(
+		struct dpu_encoder_phys *phys_enc,
+		enum dpu_intf interface);
+
+/**
+ * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
+ *	timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
+ *	note: will call dpu_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @wait_info: wait info struct
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx,
+		struct dpu_encoder_wait_info *wait_info);
+
+/**
+ * dpu_encoder_helper_register_irq - register and enable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_unregister_irq - unregister and disable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx);
+
+#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
new file mode 100644
index 0000000..3084675
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_cmd(x) \
+	container_of(x, struct dpu_encoder_phys_cmd, base)
+
+#define PP_TIMEOUT_MAX_TRIALS	10
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START	4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE	4
+
+#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+
+static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
+		struct dpu_encoder_phys_cmd *cmd_enc)
+{
+	return KICKOFF_TIMEOUT_MS;
+}
+
+static inline bool dpu_encoder_phys_cmd_is_master(
+		struct dpu_encoder_phys *phys_enc)
+{
+	return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+}
+
+static bool dpu_encoder_phys_cmd_mode_fixup(
+		struct dpu_encoder_phys *phys_enc,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	if (phys_enc)
+		DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
+	return true;
+}
+
+static void _dpu_encoder_phys_cmd_update_intf_cfg(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_hw_ctl *ctl;
+	struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+	if (!phys_enc)
+		return;
+
+	ctl = phys_enc->hw_ctl;
+	if (!ctl || !ctl->ops.setup_intf_cfg)
+		return;
+
+	intf_cfg.intf = phys_enc->intf_idx;
+	intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
+	intf_cfg.stream_sel = cmd_enc->stream_sel;
+	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+	ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+}
+
+static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+	unsigned long lock_flags;
+	int new_cnt;
+	u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+	if (!phys_enc || !phys_enc->hw_pp)
+		return;
+
+	DPU_ATRACE_BEGIN("pp_done_irq");
+	/* notify all synchronous clients first, then asynchronous clients */
+	if (phys_enc->parent_ops->handle_frame_done)
+		phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
+				phys_enc, event);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
+					  phys_enc->hw_pp->idx - PINGPONG_0,
+					  new_cnt, event);
+
+	/* Signal any waiting atomic commit thread */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+	DPU_ATRACE_END("pp_done_irq");
+}
+
+static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc || !phys_enc->hw_pp)
+		return;
+
+	DPU_ATRACE_BEGIN("rd_ptr_irq");
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (phys_enc->parent_ops->handle_vblank_virt)
+		phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+			phys_enc);
+
+	atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
+	wake_up_all(&cmd_enc->pending_vblank_wq);
+	DPU_ATRACE_END("rd_ptr_irq");
+}
+
+static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc || !phys_enc->hw_ctl)
+		return;
+
+	DPU_ATRACE_BEGIN("ctl_start_irq");
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+	/* Signal any waiting ctl start interrupt */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+	DPU_ATRACE_END("ctl_start_irq");
+}
+
+static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+
+	if (!phys_enc)
+		return;
+
+	if (phys_enc->parent_ops->handle_underrun_virt)
+		phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+			phys_enc);
+}
+
+static void _dpu_encoder_phys_cmd_setup_irq_hw_idx(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_irq *irq;
+
+	irq = &phys_enc->irq[INTR_IDX_CTL_START];
+	irq->hw_idx = phys_enc->hw_ctl->idx;
+	irq->irq_idx = -EINVAL;
+
+	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+	irq->hw_idx = phys_enc->hw_pp->idx;
+	irq->irq_idx = -EINVAL;
+
+	irq = &phys_enc->irq[INTR_IDX_RDPTR];
+	irq->hw_idx = phys_enc->hw_pp->idx;
+	irq->irq_idx = -EINVAL;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->hw_idx = phys_enc->intf_idx;
+	irq->irq_idx = -EINVAL;
+}
+
+static void dpu_encoder_phys_cmd_mode_set(
+		struct dpu_encoder_phys *phys_enc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
+	struct dpu_rm_hw_iter iter;
+	int i, instance;
+
+	if (!phys_enc || !mode || !adj_mode) {
+		DPU_ERROR("invalid args\n");
+		return;
+	}
+	phys_enc->cached_mode = *adj_mode;
+	DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
+	drm_mode_debug_printmodeline(adj_mode);
+
+	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+	/* Retrieve previously allocated HW Resources. Shouldn't fail */
+	dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+	for (i = 0; i <= instance; i++) {
+		if (dpu_rm_get_hw(rm, &iter))
+			phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+	}
+
+	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+		DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
+				PTR_ERR(phys_enc->hw_ctl));
+		phys_enc->hw_ctl = NULL;
+		return;
+	}
+
+	_dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
+}
+
+static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+	bool do_log = false;
+
+	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
+		return -EINVAL;
+
+	cmd_enc->pp_timeout_report_cnt++;
+	if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
+		frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
+		do_log = true;
+	} else if (cmd_enc->pp_timeout_report_cnt == 1) {
+		do_log = true;
+	}
+
+	trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(phys_enc->parent),
+		     phys_enc->hw_pp->idx - PINGPONG_0,
+		     cmd_enc->pp_timeout_report_cnt,
+		     atomic_read(&phys_enc->pending_kickoff_cnt),
+		     frame_event);
+
+	/* to avoid flooding, only log first time, and "dead" time */
+	if (do_log) {
+		DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
+			  DRMID(phys_enc->parent),
+			  phys_enc->hw_pp->idx - PINGPONG_0,
+			  phys_enc->hw_ctl->idx - CTL_0,
+			  cmd_enc->pp_timeout_report_cnt,
+			  atomic_read(&phys_enc->pending_kickoff_cnt));
+
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
+		dpu_dbg_dump(false, __func__, true, true);
+	}
+
+	atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+	/* request a ctl reset before the next kickoff */
+	phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+	if (phys_enc->parent_ops->handle_frame_done)
+		phys_enc->parent_ops->handle_frame_done(
+				phys_enc->parent, phys_enc, frame_event);
+
+	return -ETIMEDOUT;
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_idle(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_encoder_wait_info wait_info;
+	int ret;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	wait_info.wq = &phys_enc->pending_kickoff_wq;
+	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+	ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
+			&wait_info);
+	if (ret == -ETIMEDOUT)
+		_dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
+	else if (!ret)
+		cmd_enc->pp_timeout_report_cnt = 0;
+
+	return ret;
+}
+
+static int dpu_encoder_phys_cmd_control_vblank_irq(
+		struct dpu_encoder_phys *phys_enc,
+		bool enable)
+{
+	int ret = 0;
+	int refcount;
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	refcount = atomic_read(&phys_enc->vblank_refcount);
+
+	/* Slave encoders don't report vblank */
+	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+		goto end;
+
+	/* protect against negative */
+	if (!enable && refcount == 0) {
+		ret = -EINVAL;
+		goto end;
+	}
+
+	DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
+		      phys_enc->hw_pp->idx - PINGPONG_0,
+		      enable ? "true" : "false", refcount);
+
+	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+		ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
+	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+		ret = dpu_encoder_helper_unregister_irq(phys_enc,
+				INTR_IDX_RDPTR);
+
+end:
+	if (ret) {
+		DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
+			  DRMID(phys_enc->parent),
+			  phys_enc->hw_pp->idx - PINGPONG_0, ret,
+			  enable ? "true" : "false", refcount);
+	}
+
+	return ret;
+}
+
+static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
+		bool enable)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc)
+		return;
+
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			enable, atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable) {
+		dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
+		dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+		dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+		if (dpu_encoder_phys_cmd_is_master(phys_enc))
+			dpu_encoder_helper_register_irq(phys_enc,
+					INTR_IDX_CTL_START);
+	} else {
+		if (dpu_encoder_phys_cmd_is_master(phys_enc))
+			dpu_encoder_helper_unregister_irq(phys_enc,
+					INTR_IDX_CTL_START);
+
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+		dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
+	}
+}
+
+static void dpu_encoder_phys_cmd_tearcheck_config(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_hw_tear_check tc_cfg = { 0 };
+	struct drm_display_mode *mode;
+	bool tc_enable = true;
+	u32 vsync_hz;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	mode = &phys_enc->cached_mode;
+
+	DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+	if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+		!phys_enc->hw_pp->ops.enable_tearcheck) {
+		DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+		return;
+	}
+
+	dpu_kms = phys_enc->dpu_kms;
+	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+		DPU_ERROR("invalid device\n");
+		return;
+	}
+	priv = dpu_kms->dev->dev_private;
+
+	/*
+	 * TE default: dsi byte clock calculated base on 70 fps;
+	 * around 14 ms to complete a kickoff cycle if te disabled;
+	 * vclk_line base on 60 fps; write is faster than read;
+	 * init == start == rdptr;
+	 *
+	 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+	 * frequency divided by the no. of rows (lines) in the LCDpanel.
+	 */
+	vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
+	if (vsync_hz <= 0) {
+		DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
+				 vsync_hz);
+		return;
+	}
+
+	tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+
+	/* enable external TE after kickoff to avoid premature autorefresh */
+	tc_cfg.hw_vsync_mode = 0;
+
+	/*
+	 * By setting sync_cfg_height to near max register value, we essentially
+	 * disable dpu hw generated TE signal, since hw TE will arrive first.
+	 * Only caveat is if due to error, we hit wrap-around.
+	 */
+	tc_cfg.sync_cfg_height = 0xFFF0;
+	tc_cfg.vsync_init_val = mode->vdisplay;
+	tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+	tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+	tc_cfg.start_pos = mode->vdisplay;
+	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+	DPU_DEBUG_CMDENC(cmd_enc,
+		"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+		mode->vtotal, mode->vrefresh);
+	DPU_DEBUG_CMDENC(cmd_enc,
+		"tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+		tc_cfg.rd_ptr_irq);
+	DPU_DEBUG_CMDENC(cmd_enc,
+		"tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+		tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+	DPU_DEBUG_CMDENC(cmd_enc,
+		"tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+		tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+	phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+	phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
+}
+
+static void _dpu_encoder_phys_cmd_pingpong_config(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
+			|| !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+		DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+		return;
+	}
+
+	DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+			phys_enc->hw_pp->idx - PINGPONG_0);
+	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+	_dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
+	dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool dpu_encoder_phys_cmd_needs_single_flush(
+		struct dpu_encoder_phys *phys_enc)
+{
+	/**
+	 * we do separate flush for each CTL and let
+	 * CTL_START synchronize them
+	 */
+	return false;
+}
+
+static void dpu_encoder_phys_cmd_enable_helper(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_hw_ctl *ctl;
+	u32 flush_mask = 0;
+
+	if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+		return;
+	}
+
+	dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+
+	_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+
+	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+		goto skip_flush;
+
+	ctl = phys_enc->hw_ctl;
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+	return;
+}
+
+static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid phys encoder\n");
+		return;
+	}
+
+	DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+	if (phys_enc->enable_state == DPU_ENC_ENABLED) {
+		DPU_ERROR("already enabled\n");
+		return;
+	}
+
+	dpu_encoder_phys_cmd_enable_helper(phys_enc);
+	phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+static void _dpu_encoder_phys_cmd_connect_te(
+		struct dpu_encoder_phys *phys_enc, bool enable)
+{
+	if (!phys_enc || !phys_enc->hw_pp ||
+			!phys_enc->hw_pp->ops.connect_external_te)
+		return;
+
+	trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
+	phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
+}
+
+static void dpu_encoder_phys_cmd_prepare_idle_pc(
+		struct dpu_encoder_phys *phys_enc)
+{
+	_dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+}
+
+static int dpu_encoder_phys_cmd_get_line_count(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_hw_pingpong *hw_pp;
+
+	if (!phys_enc || !phys_enc->hw_pp)
+		return -EINVAL;
+
+	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+		return -EINVAL;
+
+	hw_pp = phys_enc->hw_pp;
+	if (!hw_pp->ops.get_line_count)
+		return -EINVAL;
+
+	return hw_pp->ops.get_line_count(hw_pp);
+}
+
+static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
+		      phys_enc->hw_pp->idx - PINGPONG_0,
+		      phys_enc->enable_state);
+
+	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+		DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
+		return;
+	}
+
+	if (phys_enc->hw_pp->ops.enable_tearcheck)
+		phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
+	phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	kfree(cmd_enc);
+}
+
+static void dpu_encoder_phys_cmd_get_hw_resources(
+		struct dpu_encoder_phys *phys_enc,
+		struct dpu_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
+		DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
+		return;
+	}
+
+	DPU_DEBUG_CMDENC(cmd_enc, "\n");
+	hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+}
+
+static void dpu_encoder_phys_cmd_prepare_for_kickoff(
+		struct dpu_encoder_phys *phys_enc,
+		struct dpu_encoder_kickoff_params *params)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	int ret;
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
+		      phys_enc->hw_pp->idx - PINGPONG_0,
+		      atomic_read(&phys_enc->pending_kickoff_cnt));
+
+	/*
+	 * Mark kickoff request as outstanding. If there are more than one,
+	 * outstanding, then we have to wait for the previous one to complete
+	 */
+	ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+	if (ret) {
+		/* force pending_kickoff_cnt 0 to discard failed kickoff */
+		atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+		DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
+			  DRMID(phys_enc->parent), ret,
+			  phys_enc->hw_pp->idx - PINGPONG_0);
+	}
+
+	DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			atomic_read(&phys_enc->pending_kickoff_cnt));
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_encoder_wait_info wait_info;
+	int ret;
+
+	if (!phys_enc || !phys_enc->hw_ctl) {
+		DPU_ERROR("invalid argument(s)\n");
+		return -EINVAL;
+	}
+
+	wait_info.wq = &phys_enc->pending_kickoff_wq;
+	wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
+	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+	ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
+			&wait_info);
+	if (ret == -ETIMEDOUT) {
+		DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
+		ret = -EINVAL;
+	} else if (!ret)
+		ret = 0;
+
+	return ret;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_tx_complete(
+		struct dpu_encoder_phys *phys_enc)
+{
+	int rc;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+	if (rc) {
+		DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
+			  DRMID(phys_enc->parent), rc,
+			  phys_enc->intf_idx - INTF_0);
+	}
+
+	return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_commit_done(
+		struct dpu_encoder_phys *phys_enc)
+{
+	int rc = 0;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	/* only required for master controller */
+	if (dpu_encoder_phys_cmd_is_master(phys_enc))
+		rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+
+	/* required for both controllers */
+	if (!rc && cmd_enc->serialize_wait4pp)
+		dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
+
+	return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_vblank(
+		struct dpu_encoder_phys *phys_enc)
+{
+	int rc = 0;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+	struct dpu_encoder_wait_info wait_info;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	/* only required for master controller */
+	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+		return rc;
+
+	wait_info.wq = &cmd_enc->pending_vblank_wq;
+	wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
+	wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+
+	atomic_inc(&cmd_enc->pending_vblank_cnt);
+
+	rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
+			&wait_info);
+
+	return rc;
+}
+
+static void dpu_encoder_phys_cmd_handle_post_kickoff(
+		struct dpu_encoder_phys *phys_enc)
+{
+	if (!phys_enc)
+		return;
+
+	/**
+	 * re-enable external TE, either for the first time after enabling
+	 * or if disabled for Autorefresh
+	 */
+	_dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+}
+
+static void dpu_encoder_phys_cmd_trigger_start(
+		struct dpu_encoder_phys *phys_enc)
+{
+	if (!phys_enc)
+		return;
+
+	dpu_encoder_helper_trigger_start(phys_enc);
+}
+
+static void dpu_encoder_phys_cmd_init_ops(
+		struct dpu_encoder_phys_ops *ops)
+{
+	ops->is_master = dpu_encoder_phys_cmd_is_master;
+	ops->mode_set = dpu_encoder_phys_cmd_mode_set;
+	ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
+	ops->enable = dpu_encoder_phys_cmd_enable;
+	ops->disable = dpu_encoder_phys_cmd_disable;
+	ops->destroy = dpu_encoder_phys_cmd_destroy;
+	ops->get_hw_resources = dpu_encoder_phys_cmd_get_hw_resources;
+	ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
+	ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
+	ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
+	ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
+	ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
+	ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
+	ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
+	ops->hw_reset = dpu_encoder_helper_hw_reset;
+	ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+	ops->restore = dpu_encoder_phys_cmd_enable_helper;
+	ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
+	ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
+	ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+		struct dpu_enc_phys_init_params *p)
+{
+	struct dpu_encoder_phys *phys_enc = NULL;
+	struct dpu_encoder_phys_cmd *cmd_enc = NULL;
+	struct dpu_hw_mdp *hw_mdp;
+	struct dpu_encoder_irq *irq;
+	int i, ret = 0;
+
+	DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
+
+	cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+	if (!cmd_enc) {
+		ret = -ENOMEM;
+		DPU_ERROR("failed to allocate\n");
+		goto fail;
+	}
+	phys_enc = &cmd_enc->base;
+
+	hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+	if (IS_ERR_OR_NULL(hw_mdp)) {
+		ret = PTR_ERR(hw_mdp);
+		DPU_ERROR("failed to get mdptop\n");
+		goto fail_mdp_init;
+	}
+	phys_enc->hw_mdptop = hw_mdp;
+	phys_enc->intf_idx = p->intf_idx;
+
+	dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
+	phys_enc->parent = p->parent;
+	phys_enc->parent_ops = p->parent_ops;
+	phys_enc->dpu_kms = p->dpu_kms;
+	phys_enc->split_role = p->split_role;
+	phys_enc->intf_mode = INTF_MODE_CMD;
+	phys_enc->enc_spinlock = p->enc_spinlock;
+	cmd_enc->stream_sel = 0;
+	phys_enc->enable_state = DPU_ENC_DISABLED;
+	for (i = 0; i < INTR_IDX_MAX; i++) {
+		irq = &phys_enc->irq[i];
+		INIT_LIST_HEAD(&irq->cb.list);
+		irq->irq_idx = -EINVAL;
+		irq->hw_idx = -EINVAL;
+		irq->cb.arg = phys_enc;
+	}
+
+	irq = &phys_enc->irq[INTR_IDX_CTL_START];
+	irq->name = "ctl_start";
+	irq->intr_type = DPU_IRQ_TYPE_CTL_START;
+	irq->intr_idx = INTR_IDX_CTL_START;
+	irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+	irq->name = "pp_done";
+	irq->intr_type = DPU_IRQ_TYPE_PING_PONG_COMP;
+	irq->intr_idx = INTR_IDX_PINGPONG;
+	irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_RDPTR];
+	irq->name = "pp_rd_ptr";
+	irq->intr_type = DPU_IRQ_TYPE_PING_PONG_RD_PTR;
+	irq->intr_idx = INTR_IDX_RDPTR;
+	irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->name = "underrun";
+	irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+	irq->intr_idx = INTR_IDX_UNDERRUN;
+	irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
+
+	atomic_set(&phys_enc->vblank_refcount, 0);
+	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+	atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
+	atomic_set(&cmd_enc->pending_vblank_cnt, 0);
+	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+	init_waitqueue_head(&cmd_enc->pending_vblank_wq);
+
+	DPU_DEBUG_CMDENC(cmd_enc, "created\n");
+
+	return phys_enc;
+
+fail_mdp_init:
+	kfree(cmd_enc);
+fail:
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
new file mode 100644
index 0000000..14fc7c2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -0,0 +1,922 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) && (e)->hw_intf ? \
+		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) && (e)->hw_intf ? \
+		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_vid(x) \
+	container_of(x, struct dpu_encoder_phys_vid, base)
+
+static bool dpu_encoder_phys_vid_is_master(
+		struct dpu_encoder_phys *phys_enc)
+{
+	bool ret = false;
+
+	if (phys_enc->split_role != ENC_ROLE_SLAVE)
+		ret = true;
+
+	return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+		const struct dpu_encoder_phys_vid *vid_enc,
+		const struct drm_display_mode *mode,
+		struct intf_timing_params *timing)
+{
+	memset(timing, 0, sizeof(*timing));
+
+	if ((mode->htotal < mode->hsync_end)
+			|| (mode->hsync_start < mode->hdisplay)
+			|| (mode->vtotal < mode->vsync_end)
+			|| (mode->vsync_start < mode->vdisplay)
+			|| (mode->hsync_end < mode->hsync_start)
+			|| (mode->vsync_end < mode->vsync_start)) {
+		DPU_ERROR(
+		    "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
+				mode->hsync_start, mode->hsync_end,
+				mode->htotal, mode->hdisplay);
+		DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
+				mode->vsync_start, mode->vsync_end,
+				mode->vtotal, mode->vdisplay);
+		return;
+	}
+
+	/*
+	 * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+	 *  Active Region      Front Porch   Sync   Back Porch
+	 * <-----------------><------------><-----><----------->
+	 * <- [hv]display --->
+	 * <--------- [hv]sync_start ------>
+	 * <----------------- [hv]sync_end ------->
+	 * <---------------------------- [hv]total ------------->
+	 */
+	timing->width = mode->hdisplay;	/* active width */
+	timing->height = mode->vdisplay;	/* active height */
+	timing->xres = timing->width;
+	timing->yres = timing->height;
+	timing->h_back_porch = mode->htotal - mode->hsync_end;
+	timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+	timing->v_back_porch = mode->vtotal - mode->vsync_end;
+	timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+	timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+	timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+	timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+	timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+	timing->border_clr = 0;
+	timing->underflow_clr = 0xff;
+	timing->hsync_skew = mode->hskew;
+
+	/* DSI controller cannot handle active-low sync signals. */
+	if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+		timing->hsync_polarity = 0;
+		timing->vsync_polarity = 0;
+	}
+
+	/*
+	 * For edp only:
+	 * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+	 * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+	 */
+	/*
+	 * if (vid_enc->hw->cap->type == INTF_EDP) {
+	 * display_v_start += mode->htotal - mode->hsync_start;
+	 * display_v_end -= mode->hsync_start - mode->hdisplay;
+	 * }
+	 */
+}
+
+static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+	u32 active = timing->xres;
+	u32 inactive =
+	    timing->h_back_porch + timing->h_front_porch +
+	    timing->hsync_pulse_width;
+	return active + inactive;
+}
+
+static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+	u32 active = timing->yres;
+	u32 inactive =
+	    timing->v_back_porch + timing->v_front_porch +
+	    timing->vsync_pulse_width;
+	return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ *	Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+		struct dpu_encoder_phys_vid *vid_enc,
+		const struct intf_timing_params *timing)
+{
+	u32 worst_case_needed_lines =
+	    vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+	u32 start_of_frame_lines =
+	    timing->v_back_porch + timing->vsync_pulse_width;
+	u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+	u32 actual_vfp_lines = 0;
+
+	/* Fetch must be outside active lines, otherwise undefined. */
+	if (start_of_frame_lines >= worst_case_needed_lines) {
+		DPU_DEBUG_VIDENC(vid_enc,
+				"prog fetch is not needed, large vbp+vsw\n");
+		actual_vfp_lines = 0;
+	} else if (timing->v_front_porch < needed_vfp_lines) {
+		/* Warn fetch needed, but not enough porch in panel config */
+		pr_warn_once
+			("low vbp+vfp may lead to perf issues in some cases\n");
+		DPU_DEBUG_VIDENC(vid_enc,
+				"less vfp than fetch req, using entire vfp\n");
+		actual_vfp_lines = timing->v_front_porch;
+	} else {
+		DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+		actual_vfp_lines = needed_vfp_lines;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc,
+		"v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+		timing->v_front_porch, timing->v_back_porch,
+		timing->vsync_pulse_width);
+	DPU_DEBUG_VIDENC(vid_enc,
+		"wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+		worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+	return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ *	the start of fetch into the vertical front porch for cases where the
+ *	vsync pulse width and vertical back porch time is insufficient
+ *
+ *	Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ *	HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
+				      const struct intf_timing_params *timing)
+{
+	struct dpu_encoder_phys_vid *vid_enc =
+		to_dpu_encoder_phys_vid(phys_enc);
+	struct intf_prog_fetch f = { 0 };
+	u32 vfp_fetch_lines = 0;
+	u32 horiz_total = 0;
+	u32 vert_total = 0;
+	u32 vfp_fetch_start_vsync_counter = 0;
+	unsigned long lock_flags;
+
+	if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+		return;
+
+	vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+	if (vfp_fetch_lines) {
+		vert_total = get_vertical_total(timing);
+		horiz_total = get_horizontal_total(timing);
+		vfp_fetch_start_vsync_counter =
+		    (vert_total - vfp_fetch_lines) * horiz_total + 1;
+		f.enable = 1;
+		f.fetch_start = vfp_fetch_start_vsync_counter;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc,
+		"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+		vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static bool dpu_encoder_phys_vid_mode_fixup(
+		struct dpu_encoder_phys *phys_enc,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	if (phys_enc)
+		DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
+
+	/*
+	 * Modifying mode has consequences when the mode comes back to us
+	 */
+	return true;
+}
+
+static void dpu_encoder_phys_vid_setup_timing_engine(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+	struct drm_display_mode mode;
+	struct intf_timing_params timing_params = { 0 };
+	const struct dpu_format *fmt = NULL;
+	u32 fmt_fourcc = DRM_FORMAT_RGB888;
+	unsigned long lock_flags;
+	struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+	if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+		DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
+		return;
+	}
+
+	mode = phys_enc->cached_mode;
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+		DPU_ERROR("timing engine setup is not supported\n");
+		return;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+	drm_mode_debug_printmodeline(&mode);
+
+	if (phys_enc->split_role != ENC_ROLE_SOLO) {
+		mode.hdisplay >>= 1;
+		mode.htotal >>= 1;
+		mode.hsync_start >>= 1;
+		mode.hsync_end >>= 1;
+
+		DPU_DEBUG_VIDENC(vid_enc,
+			"split_role %d, halve horizontal %d %d %d %d\n",
+			phys_enc->split_role,
+			mode.hdisplay, mode.htotal,
+			mode.hsync_start, mode.hsync_end);
+	}
+
+	drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+
+	fmt = dpu_get_dpu_format(fmt_fourcc);
+	DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+	intf_cfg.intf = vid_enc->hw_intf->idx;
+	intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
+	intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+			&timing_params, fmt);
+	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	programmable_fetch_config(phys_enc, &timing_params);
+
+	vid_enc->timing_params = timing_params;
+}
+
+static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+	struct dpu_hw_ctl *hw_ctl;
+	unsigned long lock_flags;
+	u32 flush_register = 0;
+	int new_cnt = -1, old_cnt = -1;
+
+	if (!phys_enc)
+		return;
+
+	hw_ctl = phys_enc->hw_ctl;
+	if (!hw_ctl)
+		return;
+
+	DPU_ATRACE_BEGIN("vblank_irq");
+
+	if (phys_enc->parent_ops->handle_vblank_virt)
+		phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+				phys_enc);
+
+	old_cnt  = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+	/*
+	 * only decrement the pending flush count if we've actually flushed
+	 * hardware. due to sw irq latency, vblank may have already happened
+	 * so we need to double-check with hw that it accepted the flush bits
+	 */
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	if (hw_ctl && hw_ctl->ops.get_flush_register)
+		flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+	if (flush_register == 0)
+		new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+				-1, 0);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	/* Signal any waiting atomic commit thread */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+	DPU_ATRACE_END("vblank_irq");
+}
+
+static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+
+	if (!phys_enc)
+		return;
+
+	if (phys_enc->parent_ops->handle_underrun_virt)
+		phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+			phys_enc);
+}
+
+static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
+{
+	if (!phys_enc)
+		return false;
+
+	if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
+		return true;
+
+	return false;
+}
+
+static bool dpu_encoder_phys_vid_needs_single_flush(
+		struct dpu_encoder_phys *phys_enc)
+{
+	return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
+}
+
+static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_irq *irq;
+
+	/*
+	 * Initialize irq->hw_idx only when irq is not registered.
+	 * Prevent invalidating irq->irq_idx as modeset may be
+	 * called many times during dfps.
+	 */
+
+	irq = &phys_enc->irq[INTR_IDX_VSYNC];
+	if (irq->irq_idx < 0)
+		irq->hw_idx = phys_enc->intf_idx;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	if (irq->irq_idx < 0)
+		irq->hw_idx = phys_enc->intf_idx;
+}
+
+static void dpu_encoder_phys_vid_mode_set(
+		struct dpu_encoder_phys *phys_enc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	struct dpu_rm *rm;
+	struct dpu_rm_hw_iter iter;
+	int i, instance;
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc || !phys_enc->dpu_kms) {
+		DPU_ERROR("invalid encoder/kms\n");
+		return;
+	}
+
+	rm = &phys_enc->dpu_kms->rm;
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	if (adj_mode) {
+		phys_enc->cached_mode = *adj_mode;
+		drm_mode_debug_printmodeline(adj_mode);
+		DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+	}
+
+	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+	/* Retrieve previously allocated HW Resources. Shouldn't fail */
+	dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+	for (i = 0; i <= instance; i++) {
+		if (dpu_rm_get_hw(rm, &iter))
+			phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+	}
+	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+		DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
+				PTR_ERR(phys_enc->hw_ctl));
+		phys_enc->hw_ctl = NULL;
+		return;
+	}
+
+	_dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
+}
+
+static int dpu_encoder_phys_vid_control_vblank_irq(
+		struct dpu_encoder_phys *phys_enc,
+		bool enable)
+{
+	int ret = 0;
+	struct dpu_encoder_phys_vid *vid_enc;
+	int refcount;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	refcount = atomic_read(&phys_enc->vblank_refcount);
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	/* Slave encoders don't report vblank */
+	if (!dpu_encoder_phys_vid_is_master(phys_enc))
+		goto end;
+
+	/* protect against negative */
+	if (!enable && refcount == 0) {
+		ret = -EINVAL;
+		goto end;
+	}
+
+	DRM_DEBUG_KMS("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
+		      atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+		ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
+	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+		ret = dpu_encoder_helper_unregister_irq(phys_enc,
+				INTR_IDX_VSYNC);
+
+end:
+	if (ret) {
+		DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
+			  DRMID(phys_enc->parent),
+			  vid_enc->hw_intf->idx - INTF_0, ret, enable,
+			  refcount);
+	}
+	return ret;
+}
+
+static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+{
+	struct msm_drm_private *priv;
+	struct dpu_encoder_phys_vid *vid_enc;
+	struct dpu_hw_intf *intf;
+	struct dpu_hw_ctl *ctl;
+	u32 flush_mask = 0;
+
+	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+			!phys_enc->parent->dev->dev_private) {
+		DPU_ERROR("invalid encoder/device\n");
+		return;
+	}
+	priv = phys_enc->parent->dev->dev_private;
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	intf = vid_enc->hw_intf;
+	ctl = phys_enc->hw_ctl;
+	if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+		DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+				vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+		return;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+	if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+		return;
+
+	dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+
+	dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
+
+	/*
+	 * For single flush cases (dual-ctl or pp-split), skip setting the
+	 * flush bit for the slave intf, since both intfs use same ctl
+	 * and HW will only flush the master.
+	 */
+	if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
+		!dpu_encoder_phys_vid_is_master(phys_enc))
+		goto skip_flush;
+
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+	DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
+		ctl->idx - CTL_0, flush_mask);
+
+	/* ctl_flush & timing engine enable will be triggered by framework */
+	if (phys_enc->enable_state == DPU_ENC_DISABLED)
+		phys_enc->enable_state = DPU_ENC_ENABLING;
+}
+
+static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+	kfree(vid_enc);
+}
+
+static void dpu_encoder_phys_vid_get_hw_resources(
+		struct dpu_encoder_phys *phys_enc,
+		struct dpu_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc || !hw_res) {
+		DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
+				phys_enc != 0, hw_res != 0, conn_state != 0);
+		return;
+	}
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf) {
+		DPU_ERROR("invalid arg(s), hw_intf\n");
+		return;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+	hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+}
+
+static int _dpu_encoder_phys_vid_wait_for_vblank(
+		struct dpu_encoder_phys *phys_enc, bool notify)
+{
+	struct dpu_encoder_wait_info wait_info;
+	int ret;
+
+	if (!phys_enc) {
+		pr_err("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	wait_info.wq = &phys_enc->pending_kickoff_wq;
+	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+	if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
+		if (notify && phys_enc->parent_ops->handle_frame_done)
+			phys_enc->parent_ops->handle_frame_done(
+					phys_enc->parent, phys_enc,
+					DPU_ENCODER_FRAME_EVENT_DONE);
+		return 0;
+	}
+
+	/* Wait for kickoff to complete */
+	ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
+			&wait_info);
+
+	if (ret == -ETIMEDOUT) {
+		dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
+	} else if (!ret && notify && phys_enc->parent_ops->handle_frame_done)
+		phys_enc->parent_ops->handle_frame_done(
+				phys_enc->parent, phys_enc,
+				DPU_ENCODER_FRAME_EVENT_DONE);
+
+	return ret;
+}
+
+static int dpu_encoder_phys_vid_wait_for_vblank(
+		struct dpu_encoder_phys *phys_enc)
+{
+	return _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+}
+
+static void dpu_encoder_phys_vid_prepare_for_kickoff(
+		struct dpu_encoder_phys *phys_enc,
+		struct dpu_encoder_kickoff_params *params)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+	struct dpu_hw_ctl *ctl;
+	int rc;
+
+	if (!phys_enc || !params) {
+		DPU_ERROR("invalid encoder/parameters\n");
+		return;
+	}
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	ctl = phys_enc->hw_ctl;
+	if (!ctl || !ctl->ops.wait_reset_status)
+		return;
+
+	/*
+	 * hw supports hardware initiated ctl reset, so before we kickoff a new
+	 * frame, need to check and wait for hw initiated ctl reset completion
+	 */
+	rc = ctl->ops.wait_reset_status(ctl);
+	if (rc) {
+		DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+				ctl->idx, rc);
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
+		dpu_dbg_dump(false, __func__, true, true);
+	}
+}
+
+static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
+{
+	struct msm_drm_private *priv;
+	struct dpu_encoder_phys_vid *vid_enc;
+	unsigned long lock_flags;
+	int ret;
+
+	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+			!phys_enc->parent->dev->dev_private) {
+		DPU_ERROR("invalid encoder/device\n");
+		return;
+	}
+	priv = phys_enc->parent->dev->dev_private;
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+		DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+				vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+		return;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+	if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+		return;
+
+	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+		DPU_ERROR("already disabled\n");
+		return;
+	}
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+	if (dpu_encoder_phys_vid_is_master(phys_enc))
+		dpu_encoder_phys_inc_pending(phys_enc);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	/*
+	 * Wait for a vsync so we know the ENABLE=0 latched before
+	 * the (connector) source of the vsync's gets disabled,
+	 * otherwise we end up in a funny state if we re-enable
+	 * before the disable latches, which results that some of
+	 * the settings changes for the new modeset (like new
+	 * scanout buffer) don't latch properly..
+	 */
+	if (dpu_encoder_phys_vid_is_master(phys_enc)) {
+		ret = _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+		if (ret) {
+			atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+			DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+				  DRMID(phys_enc->parent),
+				  vid_enc->hw_intf->idx - INTF_0, ret);
+		}
+	}
+
+	phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_vid_handle_post_kickoff(
+		struct dpu_encoder_phys *phys_enc)
+{
+	unsigned long lock_flags;
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
+
+	/*
+	 * Video mode must flush CTL before enabling timing engine
+	 * Video encoders need to turn on their interfaces now
+	 */
+	if (phys_enc->enable_state == DPU_ENC_ENABLING) {
+		trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
+				    vid_enc->hw_intf->idx - INTF_0);
+		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+		vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+		phys_enc->enable_state = DPU_ENC_ENABLED;
+	}
+}
+
+static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
+		bool enable)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+	int ret;
+
+	if (!phys_enc)
+		return;
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
+			    vid_enc->hw_intf->idx - INTF_0,
+			    enable,
+			    atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable) {
+		ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+		if (ret)
+			return;
+
+		dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+	} else {
+		dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+	}
+}
+
+static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
+						bool enable, u32 frame_count)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc)
+		return;
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+		vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
+							enable, frame_count);
+}
+
+static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc)
+		return 0;
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
+		vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
+}
+
+static int dpu_encoder_phys_vid_get_line_count(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	if (!dpu_encoder_phys_vid_is_master(phys_enc))
+		return -EINVAL;
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+		return -EINVAL;
+
+	return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+}
+
+static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+	ops->is_master = dpu_encoder_phys_vid_is_master;
+	ops->mode_set = dpu_encoder_phys_vid_mode_set;
+	ops->mode_fixup = dpu_encoder_phys_vid_mode_fixup;
+	ops->enable = dpu_encoder_phys_vid_enable;
+	ops->disable = dpu_encoder_phys_vid_disable;
+	ops->destroy = dpu_encoder_phys_vid_destroy;
+	ops->get_hw_resources = dpu_encoder_phys_vid_get_hw_resources;
+	ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
+	ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_vblank;
+	ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
+	ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+	ops->irq_control = dpu_encoder_phys_vid_irq_control;
+	ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
+	ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
+	ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
+	ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
+	ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
+	ops->hw_reset = dpu_encoder_helper_hw_reset;
+	ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+		struct dpu_enc_phys_init_params *p)
+{
+	struct dpu_encoder_phys *phys_enc = NULL;
+	struct dpu_encoder_phys_vid *vid_enc = NULL;
+	struct dpu_rm_hw_iter iter;
+	struct dpu_hw_mdp *hw_mdp;
+	struct dpu_encoder_irq *irq;
+	int i, ret = 0;
+
+	if (!p) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
+	if (!vid_enc) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	phys_enc = &vid_enc->base;
+
+	hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+	if (IS_ERR_OR_NULL(hw_mdp)) {
+		ret = PTR_ERR(hw_mdp);
+		DPU_ERROR("failed to get mdptop\n");
+		goto fail;
+	}
+	phys_enc->hw_mdptop = hw_mdp;
+	phys_enc->intf_idx = p->intf_idx;
+
+	/**
+	 * hw_intf resource permanently assigned to this encoder
+	 * Other resources allocated at atomic commit time by use case
+	 */
+	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
+	while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
+		struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
+
+		if (hw_intf->idx == p->intf_idx) {
+			vid_enc->hw_intf = hw_intf;
+			break;
+		}
+	}
+
+	if (!vid_enc->hw_intf) {
+		ret = -EINVAL;
+		DPU_ERROR("failed to get hw_intf\n");
+		goto fail;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+	dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
+	phys_enc->parent = p->parent;
+	phys_enc->parent_ops = p->parent_ops;
+	phys_enc->dpu_kms = p->dpu_kms;
+	phys_enc->split_role = p->split_role;
+	phys_enc->intf_mode = INTF_MODE_VIDEO;
+	phys_enc->enc_spinlock = p->enc_spinlock;
+	for (i = 0; i < INTR_IDX_MAX; i++) {
+		irq = &phys_enc->irq[i];
+		INIT_LIST_HEAD(&irq->cb.list);
+		irq->irq_idx = -EINVAL;
+		irq->hw_idx = -EINVAL;
+		irq->cb.arg = phys_enc;
+	}
+
+	irq = &phys_enc->irq[INTR_IDX_VSYNC];
+	irq->name = "vsync_irq";
+	irq->intr_type = DPU_IRQ_TYPE_INTF_VSYNC;
+	irq->intr_idx = INTR_IDX_VSYNC;
+	irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->name = "underrun";
+	irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+	irq->intr_idx = INTR_IDX_UNDERRUN;
+	irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
+
+	atomic_set(&phys_enc->vblank_refcount, 0);
+	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+	phys_enc->enable_state = DPU_ENC_DISABLED;
+
+	DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+
+	return phys_enc;
+
+fail:
+	DPU_ERROR("failed to create encoder\n");
+	if (vid_enc)
+		dpu_encoder_phys_vid_destroy(phys_enc);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 8189539..bfcd165 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -846,7 +846,7 @@
 	return 0;
 }
 
-int dpu_format_get_plane_sizes(
+static int dpu_format_get_plane_sizes(
 		const struct dpu_format *fmt,
 		const uint32_t w,
 		const uint32_t h,
@@ -869,47 +869,6 @@
 	return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
 }
 
-int dpu_format_get_block_size(const struct dpu_format *fmt,
-		uint32_t *w, uint32_t *h)
-{
-	if (!fmt || !w || !h) {
-		DRM_ERROR("invalid pointer\n");
-		return -EINVAL;
-	}
-
-	/* TP10 is 96x96 and all others are 128x128 */
-	if (DPU_FORMAT_IS_YUV(fmt) && DPU_FORMAT_IS_DX(fmt) &&
-			(fmt->num_planes == 2) && fmt->unpack_tight)
-		*w = *h = 96;
-	else
-		*w = *h = 128;
-
-	return 0;
-}
-
-uint32_t dpu_format_get_framebuffer_size(
-		const uint32_t format,
-		const uint32_t width,
-		const uint32_t height,
-		const uint32_t *pitches,
-		const uint64_t modifiers)
-{
-	const struct dpu_format *fmt;
-	struct dpu_hw_fmt_layout layout;
-
-	fmt = dpu_get_dpu_format_ext(format, modifiers);
-	if (!fmt)
-		return 0;
-
-	if (!pitches)
-		return -EINVAL;
-
-	if (dpu_format_get_plane_sizes(fmt, width, height, &layout, pitches))
-		layout.total_size = 0;
-
-	return layout.total_size;
-}
-
 static int _dpu_format_populate_addrs_ubwc(
 		struct msm_gem_address_space *aspace,
 		struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
index b55bfd1..a54451d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -55,36 +55,6 @@
 		uint32_t pixel_formats_max);
 
 /**
- * dpu_format_get_plane_sizes - calculate size and layout of given buffer format
- * @fmt:             pointer to dpu_format
- * @w:               width of the buffer
- * @h:               height of the buffer
- * @layout:          layout of the buffer
- * @pitches:         array of size [DPU_MAX_PLANES] to populate
- *		     pitch for each plane
- *
- * Return: size of the buffer
- */
-int dpu_format_get_plane_sizes(
-		const struct dpu_format *fmt,
-		const uint32_t w,
-		const uint32_t h,
-		struct dpu_hw_fmt_layout *layout,
-		const uint32_t *pitches);
-
-/**
- * dpu_format_get_block_size - get block size of given format when
- *	operating in block mode
- * @fmt:             pointer to dpu_format
- * @w:               pointer to width of the block
- * @h:               pointer to height of the block
- *
- * Return: 0 if success; error oode otherwise
- */
-int dpu_format_get_block_size(const struct dpu_format *fmt,
-		uint32_t *w, uint32_t *h);
-
-/**
  * dpu_format_check_modified_format - validate format and buffers for
  *                   dpu non-standard, i.e. modified format
  * @kms:             kms driver
@@ -115,22 +85,4 @@
 		struct drm_framebuffer *fb,
 		struct dpu_hw_fmt_layout *fmtl);
 
-/**
- * dpu_format_get_framebuffer_size - get framebuffer memory size
- * @format:            DRM pixel format
- * @width:             pixel width
- * @height:            pixel height
- * @pitches:           array of size [DPU_MAX_PLANES] to populate
- *		       pitch for each plane
- * @modifiers:         drm modifier
- *
- * Return: memory size required for frame buffer
- */
-uint32_t dpu_format_get_framebuffer_size(
-		const uint32_t format,
-		const uint32_t width,
-		const uint32_t height,
-		const uint32_t *pitches,
-		const uint64_t modifiers);
-
 #endif /*_DPU_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
new file mode 100644
index 0000000..58d29e4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
@@ -0,0 +1,155 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_blk.h"
+
+/* Serialization lock for dpu_hw_blk_list */
+static DEFINE_MUTEX(dpu_hw_blk_lock);
+
+/* List of all hw block objects */
+static LIST_HEAD(dpu_hw_blk_list);
+
+/**
+ * dpu_hw_blk_init - initialize hw block object
+ * @type: hw block type - enum dpu_hw_blk_type
+ * @id: instance id of the hw block
+ * @ops: Pointer to block operations
+ * return: 0 if success; error code otherwise
+ */
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+		struct dpu_hw_blk_ops *ops)
+{
+	if (!hw_blk) {
+		pr_err("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	INIT_LIST_HEAD(&hw_blk->list);
+	hw_blk->type = type;
+	hw_blk->id = id;
+	atomic_set(&hw_blk->refcount, 0);
+
+	if (ops)
+		hw_blk->ops = *ops;
+
+	mutex_lock(&dpu_hw_blk_lock);
+	list_add(&hw_blk->list, &dpu_hw_blk_list);
+	mutex_unlock(&dpu_hw_blk_lock);
+
+	return 0;
+}
+
+/**
+ * dpu_hw_blk_destroy - destroy hw block object.
+ * @hw_blk:  pointer to hw block object
+ * return: none
+ */
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk)
+{
+	if (!hw_blk) {
+		pr_err("invalid parameters\n");
+		return;
+	}
+
+	if (atomic_read(&hw_blk->refcount))
+		pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
+				hw_blk->id);
+
+	mutex_lock(&dpu_hw_blk_lock);
+	list_del(&hw_blk->list);
+	mutex_unlock(&dpu_hw_blk_lock);
+}
+
+/**
+ * dpu_hw_blk_get - get hw_blk from free pool
+ * @hw_blk: if specified, increment reference count only
+ * @type: if hw_blk is not specified, allocate the next available of this type
+ * @id: if specified (>= 0), allocate the given instance of the above type
+ * return: pointer to hw block object
+ */
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id)
+{
+	struct dpu_hw_blk *curr;
+	int rc, refcount;
+
+	if (!hw_blk) {
+		mutex_lock(&dpu_hw_blk_lock);
+		list_for_each_entry(curr, &dpu_hw_blk_list, list) {
+			if ((curr->type != type) ||
+					(id >= 0 && curr->id != id) ||
+					(id < 0 &&
+						atomic_read(&curr->refcount)))
+				continue;
+
+			hw_blk = curr;
+			break;
+		}
+		mutex_unlock(&dpu_hw_blk_lock);
+	}
+
+	if (!hw_blk) {
+		pr_debug("no hw_blk:%d\n", type);
+		return NULL;
+	}
+
+	refcount = atomic_inc_return(&hw_blk->refcount);
+
+	if (refcount == 1 && hw_blk->ops.start) {
+		rc = hw_blk->ops.start(hw_blk);
+		if (rc) {
+			pr_err("failed to start  hw_blk:%d rc:%d\n", type, rc);
+			goto error_start;
+		}
+	}
+
+	pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
+			hw_blk->id, refcount);
+	return hw_blk;
+
+error_start:
+	dpu_hw_blk_put(hw_blk);
+	return ERR_PTR(rc);
+}
+
+/**
+ * dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
+ * @hw_blk: hw block to be freed
+ * @free_blk: function to be called when reference count goes to zero
+ */
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk)
+{
+	if (!hw_blk) {
+		pr_err("invalid parameters\n");
+		return;
+	}
+
+	pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
+			atomic_read(&hw_blk->refcount));
+
+	if (!atomic_read(&hw_blk->refcount)) {
+		pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
+		return;
+	}
+
+	if (atomic_dec_return(&hw_blk->refcount))
+		return;
+
+	if (hw_blk->ops.stop)
+		hw_blk->ops.stop(hw_blk);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
new file mode 100644
index 0000000..0f4ca8af
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_BLK_H
+#define _DPU_HW_BLK_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+
+struct dpu_hw_blk;
+
+/**
+ * struct dpu_hw_blk_ops - common hardware block operations
+ * @start: start operation on first get
+ * @stop: stop operation on last put
+ */
+struct dpu_hw_blk_ops {
+	int (*start)(struct dpu_hw_blk *);
+	void (*stop)(struct dpu_hw_blk *);
+};
+
+/**
+ * struct dpu_hw_blk - definition of hardware block object
+ * @list: list of hardware blocks
+ * @type: hardware block type
+ * @id: instance id
+ * @refcount: reference/usage count
+ */
+struct dpu_hw_blk {
+	struct list_head list;
+	u32 type;
+	int id;
+	atomic_t refcount;
+	struct dpu_hw_blk_ops ops;
+};
+
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+		struct dpu_hw_blk_ops *ops);
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
+
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id);
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk);
+#endif /*_DPU_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
new file mode 100644
index 0000000..44ee063
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -0,0 +1,511 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_kms.h"
+
+#define VIG_SDM845_MASK \
+	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
+	BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
+	BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define DMA_SDM845_MASK \
+	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+	BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
+	BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define MIXER_SDM845_MASK \
+	(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+
+#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
+
+#define PINGPONG_SDM845_SPLIT_MASK \
+	(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+
+#define DEFAULT_PIXEL_RAM_SIZE		(50 * 1024)
+#define DEFAULT_DPU_LINE_WIDTH		2048
+#define DEFAULT_DPU_OUTPUT_LINE_WIDTH	2560
+
+#define MAX_HORZ_DECIMATION	4
+#define MAX_VERT_DECIMATION	4
+
+#define MAX_UPSCALE_RATIO	20
+#define MAX_DOWNSCALE_RATIO	4
+#define SSPP_UNITY_SCALE	1
+
+#define STRCAT(X, Y) (X Y)
+
+/*************************************************************
+ * DPU sub blocks config
+ *************************************************************/
+/* DPU top level caps */
+static const struct dpu_caps sdm845_dpu_caps = {
+	.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.max_mixer_blendstages = 0xb,
+	.qseed_type = DPU_SSPP_SCALER_QSEED3,
+	.smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+	.ubwc_version = DPU_HW_UBWC_VER_20,
+	.has_src_split = true,
+	.has_dim_layer = true,
+	.has_idle_pc = true,
+};
+
+static struct dpu_mdp_cfg sdm845_mdp[] = {
+	{
+	.name = "top_0", .id = MDP_TOP,
+	.base = 0x0, .len = 0x45C,
+	.features = 0,
+	.highest_bank_bit = 0x2,
+	.has_dest_scaler = true,
+	.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+			.reg_off = 0x2AC, .bit_off = 0},
+	.clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+			.reg_off = 0x2B4, .bit_off = 0},
+	.clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+			.reg_off = 0x2BC, .bit_off = 0},
+	.clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+			.reg_off = 0x2C4, .bit_off = 0},
+	.clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+			.reg_off = 0x2AC, .bit_off = 8},
+	.clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+			.reg_off = 0x2B4, .bit_off = 8},
+	.clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+			.reg_off = 0x2BC, .bit_off = 8},
+	.clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+			.reg_off = 0x2C4, .bit_off = 8},
+	},
+};
+
+/*************************************************************
+ * CTL sub blocks config
+ *************************************************************/
+static struct dpu_ctl_cfg sdm845_ctl[] = {
+	{
+	.name = "ctl_0", .id = CTL_0,
+	.base = 0x1000, .len = 0xE4,
+	.features = BIT(DPU_CTL_SPLIT_DISPLAY)
+	},
+	{
+	.name = "ctl_1", .id = CTL_1,
+	.base = 0x1200, .len = 0xE4,
+	.features = BIT(DPU_CTL_SPLIT_DISPLAY)
+	},
+	{
+	.name = "ctl_2", .id = CTL_2,
+	.base = 0x1400, .len = 0xE4,
+	.features = 0
+	},
+	{
+	.name = "ctl_3", .id = CTL_3,
+	.base = 0x1600, .len = 0xE4,
+	.features = 0
+	},
+	{
+	.name = "ctl_4", .id = CTL_4,
+	.base = 0x1800, .len = 0xE4,
+	.features = 0
+	},
+};
+
+/*************************************************************
+ * SSPP sub blocks config
+ *************************************************************/
+
+/* SSPP common configuration */
+static const struct dpu_sspp_blks_common sdm845_sspp_common = {
+	.maxlinewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+	.maxhdeciexp = MAX_HORZ_DECIMATION,
+	.maxvdeciexp = MAX_VERT_DECIMATION,
+};
+
+#define _VIG_SBLK(num, sdma_pri) \
+	{ \
+	.common = &sdm845_sspp_common, \
+	.maxdwnscale = MAX_DOWNSCALE_RATIO, \
+	.maxupscale = MAX_UPSCALE_RATIO, \
+	.smart_dma_priority = sdma_pri, \
+	.src_blk = {.name = STRCAT("sspp_src_", num), \
+		.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+	.scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+		.id = DPU_SSPP_SCALER_QSEED3, \
+		.base = 0xa00, .len = 0xa0,}, \
+	.csc_blk = {.name = STRCAT("sspp_csc", num), \
+		.id = DPU_SSPP_CSC_10BIT, \
+		.base = 0x1a00, .len = 0x100,}, \
+	.format_list = plane_formats_yuv, \
+	.virt_format_list = plane_formats, \
+	}
+
+#define _DMA_SBLK(num, sdma_pri) \
+	{ \
+	.common = &sdm845_sspp_common, \
+	.maxdwnscale = SSPP_UNITY_SCALE, \
+	.maxupscale = SSPP_UNITY_SCALE, \
+	.smart_dma_priority = sdma_pri, \
+	.src_blk = {.name = STRCAT("sspp_src_", num), \
+		.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+	.format_list = plane_formats, \
+	.virt_format_list = plane_formats, \
+	}
+
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
+
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
+
+#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+	{ \
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x1c8, \
+	.features = VIG_SDM845_MASK, \
+	.sblk = &_sblk, \
+	.xin_id = _xinid, \
+	.type = SSPP_TYPE_VIG, \
+	.clk_ctrl = _clkctrl \
+	}
+
+#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+	{ \
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x1c8, \
+	.features = DMA_SDM845_MASK, \
+	.sblk = &_sblk, \
+	.xin_id = _xinid, \
+	.type = SSPP_TYPE_DMA, \
+	.clk_ctrl = _clkctrl \
+	}
+
+static struct dpu_sspp_cfg sdm845_sspp[] = {
+	SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
+		sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
+	SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
+		sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
+	SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
+		sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
+	SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
+		sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
+	SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
+		sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
+	SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
+		sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
+	SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
+		sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
+	SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
+		sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
+};
+
+/*************************************************************
+ * MIXER sub blocks config
+ *************************************************************/
+static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
+	.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.maxblendstages = 11, /* excluding base layer */
+	.blendstage_base = { /* offsets relative to mixer base */
+		0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
+		0xb0, 0xc8, 0xe0, 0xf8, 0x110
+	},
+};
+
+#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
+	{ \
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x320, \
+	.features = MIXER_SDM845_MASK, \
+	.sblk = &sdm845_lm_sblk, \
+	.ds = _ds, \
+	.pingpong = _pp, \
+	.lm_pair_mask = (1 << _lmpair) \
+	}
+
+static struct dpu_lm_cfg sdm845_lm[] = {
+	LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
+	LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
+	LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
+	LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
+	LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
+	LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
+};
+
+/*************************************************************
+ * DS sub blocks config
+ *************************************************************/
+static const struct dpu_ds_top_cfg sdm845_ds_top = {
+	.name = "ds_top_0", .id = DS_TOP,
+	.base = 0x60000, .len = 0xc,
+	.maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
+	.maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.maxupscale = MAX_UPSCALE_RATIO,
+};
+
+#define DS_BLK(_name, _id, _base) \
+	{\
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x800, \
+	.features = DPU_SSPP_SCALER_QSEED3, \
+	.top = &sdm845_ds_top \
+	}
+
+static struct dpu_ds_cfg sdm845_ds[] = {
+	DS_BLK("ds_0", DS_0, 0x800),
+	DS_BLK("ds_1", DS_1, 0x1000),
+};
+
+/*************************************************************
+ * PINGPONG sub blocks config
+ *************************************************************/
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
+	.te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+		.version = 0x1},
+	.dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+		.len = 0x20, .version = 0x10000},
+};
+
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
+	.dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+		.len = 0x20, .version = 0x10000},
+};
+
+#define PP_BLK_TE(_name, _id, _base) \
+	{\
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0xd4, \
+	.features = PINGPONG_SDM845_SPLIT_MASK, \
+	.sblk = &sdm845_pp_sblk_te \
+	}
+#define PP_BLK(_name, _id, _base) \
+	{\
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0xd4, \
+	.features = PINGPONG_SDM845_MASK, \
+	.sblk = &sdm845_pp_sblk \
+	}
+
+static struct dpu_pingpong_cfg sdm845_pp[] = {
+	PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+	PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+	PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
+	PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
+};
+
+/*************************************************************
+ * INTF sub blocks config
+ *************************************************************/
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id) \
+	{\
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x280, \
+	.type = _type, \
+	.controller_id = _ctrl_id, \
+	.prog_fetch_lines_worst_case = 24 \
+	}
+
+static struct dpu_intf_cfg sdm845_intf[] = {
+	INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
+	INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+	INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
+	INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
+};
+
+/*************************************************************
+ * CDM sub blocks config
+ *************************************************************/
+static struct dpu_cdm_cfg sdm845_cdm[] = {
+	{
+	.name = "cdm_0", .id = CDM_0,
+	.base = 0x79200, .len = 0x224,
+	.features = 0,
+	.intf_connect = BIT(INTF_3),
+	},
+};
+
+/*************************************************************
+ * VBIF sub blocks config
+ *************************************************************/
+/* VBIF QOS remap */
+static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+
+static struct dpu_vbif_cfg sdm845_vbif[] = {
+	{
+	.name = "vbif_0", .id = VBIF_0,
+	.base = 0, .len = 0x1040,
+	.features = BIT(DPU_VBIF_QOS_REMAP),
+	.xin_halt_timeout = 0x4000,
+	.qos_rt_tbl = {
+		.npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
+		.priority_lvl = sdm845_rt_pri_lvl,
+		},
+	.qos_nrt_tbl = {
+		.npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+		.priority_lvl = sdm845_nrt_pri_lvl,
+		},
+	.memtype_count = 14,
+	.memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+	},
+};
+
+static struct dpu_reg_dma_cfg sdm845_regdma = {
+	.base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
+};
+
+/*************************************************************
+ * PERF data config
+ *************************************************************/
+
+/* SSPP QOS LUTs */
+static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+	{.fl = 4, .lut = 0x357},
+	{.fl = 5, .lut = 0x3357},
+	{.fl = 6, .lut = 0x23357},
+	{.fl = 7, .lut = 0x223357},
+	{.fl = 8, .lut = 0x2223357},
+	{.fl = 9, .lut = 0x22223357},
+	{.fl = 10, .lut = 0x222223357},
+	{.fl = 11, .lut = 0x2222223357},
+	{.fl = 12, .lut = 0x22222223357},
+	{.fl = 13, .lut = 0x222222223357},
+	{.fl = 14, .lut = 0x1222222223357},
+	{.fl = 0, .lut = 0x11222222223357}
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+	{.fl = 10, .lut = 0x344556677},
+	{.fl = 11, .lut = 0x3344556677},
+	{.fl = 12, .lut = 0x23344556677},
+	{.fl = 13, .lut = 0x223344556677},
+	{.fl = 14, .lut = 0x1223344556677},
+	{.fl = 0, .lut = 0x112233344556677},
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+	{.fl = 0, .lut = 0x0},
+};
+
+static struct dpu_perf_cfg sdm845_perf_data = {
+	.max_bw_low = 6800000,
+	.max_bw_high = 6800000,
+	.min_core_ib = 2400000,
+	.min_llcc_ib = 800000,
+	.min_dram_ib = 800000,
+	.core_ib_ff = "6.0",
+	.core_clk_ff = "1.0",
+	.comp_ratio_rt =
+	"NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23",
+	.comp_ratio_nrt =
+	"NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25",
+	.undersized_prefill_lines = 2,
+	.xtra_prefill_lines = 2,
+	.dest_scale_prefill_lines = 3,
+	.macrotile_prefill_lines = 4,
+	.yuv_nv12_prefill_lines = 8,
+	.linear_prefill_lines = 1,
+	.downscaling_prefill_lines = 1,
+	.amortizable_threshold = 25,
+	.min_prefill_lines = 24,
+	.danger_lut_tbl = {0xf, 0xffff, 0x0},
+	.qos_lut_tbl = {
+		{.nentry = ARRAY_SIZE(sdm845_qos_linear),
+		.entries = sdm845_qos_linear
+		},
+		{.nentry = ARRAY_SIZE(sdm845_qos_macrotile),
+		.entries = sdm845_qos_macrotile
+		},
+		{.nentry = ARRAY_SIZE(sdm845_qos_nrt),
+		.entries = sdm845_qos_nrt
+		},
+	},
+	.cdp_cfg = {
+		{.rd_enable = 1, .wr_enable = 1},
+		{.rd_enable = 1, .wr_enable = 0}
+	},
+};
+
+/*************************************************************
+ * Hardware catalog init
+ *************************************************************/
+
+/*
+ * sdm845_cfg_init(): populate sdm845 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+	*dpu_cfg = (struct dpu_mdss_cfg){
+		.caps = &sdm845_dpu_caps,
+		.mdp_count = ARRAY_SIZE(sdm845_mdp),
+		.mdp = sdm845_mdp,
+		.ctl_count = ARRAY_SIZE(sdm845_ctl),
+		.ctl = sdm845_ctl,
+		.sspp_count = ARRAY_SIZE(sdm845_sspp),
+		.sspp = sdm845_sspp,
+		.mixer_count = ARRAY_SIZE(sdm845_lm),
+		.mixer = sdm845_lm,
+		.ds_count = ARRAY_SIZE(sdm845_ds),
+		.ds = sdm845_ds,
+		.pingpong_count = ARRAY_SIZE(sdm845_pp),
+		.pingpong = sdm845_pp,
+		.cdm_count = ARRAY_SIZE(sdm845_cdm),
+		.cdm = sdm845_cdm,
+		.intf_count = ARRAY_SIZE(sdm845_intf),
+		.intf = sdm845_intf,
+		.vbif_count = ARRAY_SIZE(sdm845_vbif),
+		.vbif = sdm845_vbif,
+		.reg_dma_count = 1,
+		.dma_cfg = sdm845_regdma,
+		.perf = sdm845_perf_data,
+	};
+}
+
+static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+	{ .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
+	{ .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+};
+
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
+{
+	kfree(dpu_cfg);
+}
+
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
+{
+	int i;
+	struct dpu_mdss_cfg *dpu_cfg;
+
+	dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
+	if (!dpu_cfg)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
+		if (cfg_handler[i].hw_rev == hw_rev) {
+			cfg_handler[i].cfg_init(dpu_cfg);
+			dpu_cfg->hwversion = hw_rev;
+			return dpu_cfg;
+		}
+	}
+
+	DPU_ERROR("unsupported chipset id:%X\n", hw_rev);
+	dpu_hw_catalog_deinit(dpu_cfg);
+	return ERR_PTR(-ENODEV);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
new file mode 100644
index 0000000..f0cb0d4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -0,0 +1,804 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CATALOG_H
+#define _DPU_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <drm/drmP.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS    12
+
+#define DPU_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28)    |\
+		((MINOR & 0xFFF) << 16)  |\
+		(STEP & 0xFFFF))
+
+#define DPU_HW_MAJOR(rev)		((rev) >> 28)
+#define DPU_HW_MINOR(rev)		(((rev) >> 16) & 0xFFF)
+#define DPU_HW_STEP(rev)		((rev) & 0xFFFF)
+#define DPU_HW_MAJOR_MINOR(rev)		((rev) >> 16)
+
+#define IS_DPU_MAJOR_MINOR_SAME(rev1, rev2)   \
+	(DPU_HW_MAJOR_MINOR((rev1)) == DPU_HW_MAJOR_MINOR((rev2)))
+
+#define DPU_HW_VER_170	DPU_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define DPU_HW_VER_171	DPU_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define DPU_HW_VER_172	DPU_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define DPU_HW_VER_300	DPU_HW_VER(3, 0, 0) /* 8998 v1.0 */
+#define DPU_HW_VER_301	DPU_HW_VER(3, 0, 1) /* 8998 v1.1 */
+#define DPU_HW_VER_400	DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */
+#define DPU_HW_VER_401	DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
+#define DPU_HW_VER_410	DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
+#define DPU_HW_VER_500	DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+
+
+#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
+#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
+#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
+#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
+#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+
+
+#define DPU_HW_BLK_NAME_LEN	16
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS	2
+
+#define MAX_XIN_COUNT 16
+
+/**
+ * Supported UBWC feature versions
+ */
+enum {
+	DPU_HW_UBWC_VER_10 = 0x100,
+	DPU_HW_UBWC_VER_20 = 0x200,
+	DPU_HW_UBWC_VER_30 = 0x300,
+};
+
+#define IS_UBWC_20_SUPPORTED(rev)       ((rev) >= DPU_HW_UBWC_VER_20)
+
+/**
+ * MDP TOP BLOCK features
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @DPU_MDP_BWC,           MDSS HW supports Bandwidth compression.
+ * @DPU_MDP_UBWC_1_0,      This chipsets supports Universal Bandwidth
+ *                         compression initial revision
+ * @DPU_MDP_UBWC_1_5,      Universal Bandwidth compression version 1.5
+ * @DPU_MDP_MAX            Maximum value
+
+ */
+enum {
+	DPU_MDP_PANIC_PER_PIPE = 0x1,
+	DPU_MDP_10BIT_SUPPORT,
+	DPU_MDP_BWC,
+	DPU_MDP_UBWC_1_0,
+	DPU_MDP_UBWC_1_5,
+	DPU_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @DPU_SSPP_SRC             Src and fetch part of the pipes,
+ * @DPU_SSPP_SCALER_QSEED2,  QSEED2 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3,  QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_RGB,     RGB Scaler, supported by RGB pipes
+ * @DPU_SSPP_CSC,            Support of Color space converion
+ * @DPU_SSPP_CSC_10BIT,      Support of 10-bit Color space conversion
+ * @DPU_SSPP_CURSOR,         SSPP can be used as a cursor layer
+ * @DPU_SSPP_QOS,            SSPP support QoS control, danger/safe/creq
+ * @DPU_SSPP_QOS_8LVL,       SSPP support 8-level QoS control
+ * @DPU_SSPP_EXCL_RECT,      SSPP supports exclusion rect
+ * @DPU_SSPP_SMART_DMA_V1,   SmartDMA 1.0 support
+ * @DPU_SSPP_SMART_DMA_V2,   SmartDMA 2.0 support
+ * @DPU_SSPP_TS_PREFILL      Supports prefill with traffic shaper
+ * @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @DPU_SSPP_CDP             Supports client driven prefetch
+ * @DPU_SSPP_MAX             maximum value
+ */
+enum {
+	DPU_SSPP_SRC = 0x1,
+	DPU_SSPP_SCALER_QSEED2,
+	DPU_SSPP_SCALER_QSEED3,
+	DPU_SSPP_SCALER_RGB,
+	DPU_SSPP_CSC,
+	DPU_SSPP_CSC_10BIT,
+	DPU_SSPP_CURSOR,
+	DPU_SSPP_QOS,
+	DPU_SSPP_QOS_8LVL,
+	DPU_SSPP_EXCL_RECT,
+	DPU_SSPP_SMART_DMA_V1,
+	DPU_SSPP_SMART_DMA_V2,
+	DPU_SSPP_TS_PREFILL,
+	DPU_SSPP_TS_PREFILL_REC1,
+	DPU_SSPP_CDP,
+	DPU_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @DPU_MIXER_LAYER           Layer mixer layer blend configuration,
+ * @DPU_MIXER_SOURCESPLIT     Layer mixer supports source-split configuration
+ * @DPU_MIXER_GC              Gamma correction block
+ * @DPU_DIM_LAYER             Layer mixer supports dim layer
+ * @DPU_MIXER_MAX             maximum value
+ */
+enum {
+	DPU_MIXER_LAYER = 0x1,
+	DPU_MIXER_SOURCESPLIT,
+	DPU_MIXER_GC,
+	DPU_DIM_LAYER,
+	DPU_MIXER_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @DPU_PINGPONG_TE         Tear check block
+ * @DPU_PINGPONG_TE2        Additional tear check block for split pipes
+ * @DPU_PINGPONG_SPLIT      PP block supports split fifo
+ * @DPU_PINGPONG_SLAVE      PP block is a suitable slave for split fifo
+ * @DPU_PINGPONG_DITHER,    Dither blocks
+ * @DPU_PINGPONG_MAX
+ */
+enum {
+	DPU_PINGPONG_TE = 0x1,
+	DPU_PINGPONG_TE2,
+	DPU_PINGPONG_SPLIT,
+	DPU_PINGPONG_SLAVE,
+	DPU_PINGPONG_DITHER,
+	DPU_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @DPU_CTL_SPLIT_DISPLAY       CTL supports video mode split display
+ * @DPU_CTL_MAX
+ */
+enum {
+	DPU_CTL_SPLIT_DISPLAY = 0x1,
+	DPU_CTL_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @DPU_VBIF_QOS_OTLIM        VBIF supports OT Limit
+ * @DPU_VBIF_QOS_REMAP        VBIF supports QoS priority remap
+ * @DPU_VBIF_MAX              maximum value
+ */
+enum {
+	DPU_VBIF_QOS_OTLIM = 0x1,
+	DPU_VBIF_QOS_REMAP,
+	DPU_VBIF_MAX
+};
+
+/**
+ * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
+ * @name:              string name for debug purposes
+ * @id:                enum identifying this block
+ * @base:              register base offset to mdss
+ * @len:               length of hardware block
+ * @features           bit mask identifying sub-blocks/features
+ */
+#define DPU_HW_BLK_INFO \
+	char name[DPU_HW_BLK_NAME_LEN]; \
+	u32 id; \
+	u32 base; \
+	u32 len; \
+	unsigned long features
+
+/**
+ * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
+ * @name:              string name for debug purposes
+ * @id:                enum identifying this sub-block
+ * @base:              offset of this sub-block relative to the block
+ *                     offset
+ * @len                register block length of this sub-block
+ */
+#define DPU_HW_SUBBLK_INFO \
+	char name[DPU_HW_BLK_NAME_LEN]; \
+	u32 id; \
+	u32 base; \
+	u32 len
+
+/**
+ * struct dpu_src_blk: SSPP part of the source pipes
+ * @info:   HW register and features supported by this sub-blk
+ */
+struct dpu_src_blk {
+	DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_scaler_blk: Scaler information
+ * @info:   HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct dpu_scaler_blk {
+	DPU_HW_SUBBLK_INFO;
+	u32 version;
+};
+
+struct dpu_csc_blk {
+	DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_pp_blk : Pixel processing sub-blk information
+ * @info:   HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct dpu_pp_blk {
+	DPU_HW_SUBBLK_INFO;
+	u32 version;
+};
+
+/**
+ * struct dpu_format_extended - define dpu specific pixel format+modifier
+ * @fourcc_format: Base FOURCC pixel format code
+ * @modifier: 64-bit drm format modifier, same modifier must be applied to all
+ *            framebuffer planes
+ */
+struct dpu_format_extended {
+	uint32_t fourcc_format;
+	uint64_t modifier;
+};
+
+/**
+ * enum dpu_qos_lut_usage - define QoS LUT use cases
+ */
+enum dpu_qos_lut_usage {
+	DPU_QOS_LUT_USAGE_LINEAR,
+	DPU_QOS_LUT_USAGE_MACROTILE,
+	DPU_QOS_LUT_USAGE_NRT,
+	DPU_QOS_LUT_USAGE_MAX,
+};
+
+/**
+ * struct dpu_qos_lut_entry - define QoS LUT table entry
+ * @fl: fill level, or zero on last entry to indicate default lut
+ * @lut: lut to use if equal to or less than fill level
+ */
+struct dpu_qos_lut_entry {
+	u32 fl;
+	u64 lut;
+};
+
+/**
+ * struct dpu_qos_lut_tbl - define QoS LUT table
+ * @nentry: number of entry in this table
+ * @entries: Pointer to table entries
+ */
+struct dpu_qos_lut_tbl {
+	u32 nentry;
+	struct dpu_qos_lut_entry *entries;
+};
+
+/**
+ * struct dpu_caps - define DPU capabilities
+ * @max_mixer_width    max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ *                       supported z order
+ * @qseed_type         qseed2 or qseed3 support.
+ * @smart_dma_rev      Supported version of SmartDMA feature.
+ * @ubwc_version       UBWC feature version (0x0 for not supported)
+ * @has_src_split      source split feature status
+ * @has_dim_layer      dim layer feature status
+ * @has_idle_pc        indicate if idle power collapse feature is supported
+ */
+struct dpu_caps {
+	u32 max_mixer_width;
+	u32 max_mixer_blendstages;
+	u32 qseed_type;
+	u32 smart_dma_rev;
+	u32 ubwc_version;
+	bool has_src_split;
+	bool has_dim_layer;
+	bool has_idle_pc;
+};
+
+/**
+ * struct dpu_sspp_blks_common : SSPP sub-blocks common configuration
+ * @maxwidth: max pixelwidth supported by this pipe
+ * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
+ * @maxhdeciexp: max horizontal decimation supported by this pipe
+ *				(max is 2^value)
+ * @maxvdeciexp: max vertical decimation supported by this pipe
+ *				(max is 2^value)
+ */
+struct dpu_sspp_blks_common {
+	u32 maxlinewidth;
+	u32 pixel_ram_size;
+	u32 maxhdeciexp;
+	u32 maxvdeciexp;
+};
+
+/**
+ * struct dpu_sspp_sub_blks : SSPP sub-blocks
+ * common: Pointer to common configurations shared by sub blocks
+ * @creq_vblank: creq priority during vertical blanking
+ * @danger_vblank: danger priority during vertical blanking
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale:  maxupscale ratio supported
+ * @smart_dma_priority: hw priority of rect1 of multirect pipe
+ * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @src_blk:
+ * @scaler_blk:
+ * @csc_blk:
+ * @hsic:
+ * @memcolor:
+ * @pcc_blk:
+ * @igc_blk:
+ * @format_list: Pointer to list of supported formats
+ * @virt_format_list: Pointer to list of supported formats for virtual planes
+ */
+struct dpu_sspp_sub_blks {
+	const struct dpu_sspp_blks_common *common;
+	u32 creq_vblank;
+	u32 danger_vblank;
+	u32 maxdwnscale;
+	u32 maxupscale;
+	u32 smart_dma_priority;
+	u32 max_per_pipe_bw;
+	struct dpu_src_blk src_blk;
+	struct dpu_scaler_blk scaler_blk;
+	struct dpu_pp_blk csc_blk;
+	struct dpu_pp_blk hsic_blk;
+	struct dpu_pp_blk memcolor_blk;
+	struct dpu_pp_blk pcc_blk;
+	struct dpu_pp_blk igc_blk;
+
+	const struct dpu_format_extended *format_list;
+	const struct dpu_format_extended *virt_format_list;
+};
+
+/**
+ * struct dpu_lm_sub_blks:      information of mixer block
+ * @maxwidth:               Max pixel width supported by this mixer
+ * @maxblendstages:         Max number of blend-stages supported
+ * @blendstage_base:        Blend-stage register base offset
+ * @gc: gamma correction block
+ */
+struct dpu_lm_sub_blks {
+	u32 maxwidth;
+	u32 maxblendstages;
+	u32 blendstage_base[MAX_BLOCKS];
+	struct dpu_pp_blk gc;
+};
+
+struct dpu_pingpong_sub_blks {
+	struct dpu_pp_blk te;
+	struct dpu_pp_blk te2;
+	struct dpu_pp_blk dither;
+};
+
+/**
+ * dpu_clk_ctrl_type - Defines top level clock control signals
+ */
+enum dpu_clk_ctrl_type {
+	DPU_CLK_CTRL_NONE,
+	DPU_CLK_CTRL_VIG0,
+	DPU_CLK_CTRL_VIG1,
+	DPU_CLK_CTRL_VIG2,
+	DPU_CLK_CTRL_VIG3,
+	DPU_CLK_CTRL_VIG4,
+	DPU_CLK_CTRL_RGB0,
+	DPU_CLK_CTRL_RGB1,
+	DPU_CLK_CTRL_RGB2,
+	DPU_CLK_CTRL_RGB3,
+	DPU_CLK_CTRL_DMA0,
+	DPU_CLK_CTRL_DMA1,
+	DPU_CLK_CTRL_CURSOR0,
+	DPU_CLK_CTRL_CURSOR1,
+	DPU_CLK_CTRL_INLINE_ROT0_SSPP,
+	DPU_CLK_CTRL_MAX,
+};
+
+/* struct dpu_clk_ctrl_reg : Clock control register
+ * @reg_off:           register offset
+ * @bit_off:           bit offset
+ */
+struct dpu_clk_ctrl_reg {
+	u32 reg_off;
+	u32 bit_off;
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id:                index identifying this block
+ * @base:              register base offset to mdss
+ * @features           bit mask identifying sub-blocks/features
+ * @highest_bank_bit:  UBWC parameter
+ * @ubwc_static:       ubwc static configuration
+ * @ubwc_swizzle:      ubwc default swizzle setting
+ * @has_dest_scaler:   indicates support of destination scaler
+ * @clk_ctrls          clock control register definition
+ */
+struct dpu_mdp_cfg {
+	DPU_HW_BLK_INFO;
+	u32 highest_bank_bit;
+	u32 ubwc_static;
+	u32 ubwc_swizzle;
+	bool has_dest_scaler;
+	struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id:                index identifying this block
+ * @base:              register base offset to mdss
+ * @features           bit mask identifying sub-blocks/features
+ */
+struct dpu_ctl_cfg {
+	DPU_HW_BLK_INFO;
+};
+
+/**
+ * struct dpu_sspp_cfg - information of source pipes
+ * @id:                index identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk:              SSPP sub-blocks information
+ * @xin_id:            bus client identifier
+ * @clk_ctrl           clock control identifier
+ * @type               sspp type identifier
+ */
+struct dpu_sspp_cfg {
+	DPU_HW_BLK_INFO;
+	const struct dpu_sspp_sub_blks *sblk;
+	u32 xin_id;
+	enum dpu_clk_ctrl_type clk_ctrl;
+	u32 type;
+};
+
+/**
+ * struct dpu_lm_cfg - information of layer mixer blocks
+ * @id:                index identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk:              LM Sub-blocks information
+ * @pingpong:          ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @ds:                ID of connected DS, DS_MAX if unsupported
+ * @lm_pair_mask:      Bitmask of LMs that can be controlled by same CTL
+ */
+struct dpu_lm_cfg {
+	DPU_HW_BLK_INFO;
+	const struct dpu_lm_sub_blks *sblk;
+	u32 pingpong;
+	u32 ds;
+	unsigned long lm_pair_mask;
+};
+
+/**
+ * struct dpu_ds_top_cfg - information of dest scaler top
+ * @id               enum identifying this block
+ * @base             register offset of this block
+ * @features         bit mask identifying features
+ * @version          hw version of dest scaler
+ * @maxinputwidth    maximum input line width
+ * @maxoutputwidth   maximum output line width
+ * @maxupscale       maximum upscale ratio
+ */
+struct dpu_ds_top_cfg {
+	DPU_HW_BLK_INFO;
+	u32 version;
+	u32 maxinputwidth;
+	u32 maxoutputwidth;
+	u32 maxupscale;
+};
+
+/**
+ * struct dpu_ds_cfg - information of dest scaler blocks
+ * @id          enum identifying this block
+ * @base        register offset wrt DS top offset
+ * @features    bit mask identifying features
+ * @version     hw version of the qseed block
+ * @top         DS top information
+ */
+struct dpu_ds_cfg {
+	DPU_HW_BLK_INFO;
+	u32 version;
+	const struct dpu_ds_top_cfg *top;
+};
+
+/**
+ * struct dpu_pingpong_cfg - information of PING-PONG blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk               sub-blocks information
+ */
+struct dpu_pingpong_cfg  {
+	DPU_HW_BLK_INFO;
+	const struct dpu_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_cdm_cfg - information of chroma down blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @intf_connect       Bitmask of INTF IDs this CDM can connect to
+ */
+struct dpu_cdm_cfg   {
+	DPU_HW_BLK_INFO;
+	unsigned long intf_connect;
+};
+
+/**
+ * struct dpu_intf_cfg - information of timing engine blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @type:              Interface type(DSI, DP, HDMI)
+ * @controller_id:     Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case	Worst case latency num lines needed to prefetch
+ */
+struct dpu_intf_cfg  {
+	DPU_HW_BLK_INFO;
+	u32 type;   /* interface type*/
+	u32 controller_id;
+	u32 prog_fetch_lines_worst_case;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps                pixel per seconds
+ * @ot_limit           OT limit to use up to specified pixel per second
+ */
+struct dpu_vbif_dynamic_ot_cfg {
+	u64 pps;
+	u32 ot_limit;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count              length of cfg
+ * @cfg                pointer to array of configuration settings with
+ *                     ascending requirements
+ */
+struct dpu_vbif_dynamic_ot_tbl {
+	u32 count;
+	struct dpu_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct dpu_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl      num of priority level
+ * @priority_lvl       pointer to array of priority level in ascending order
+ */
+struct dpu_vbif_qos_tbl {
+	u32 npriority_lvl;
+	u32 *priority_lvl;
+};
+
+/**
+ * struct dpu_vbif_cfg - information of VBIF blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @ot_rd_limit        default OT read limit
+ * @ot_wr_limit        default OT write limit
+ * @xin_halt_timeout   maximum time (in usec) for xin to halt
+ * @dynamic_ot_rd_tbl  dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl  dynamic OT write configuration table
+ * @qos_rt_tbl         real-time QoS priority table
+ * @qos_nrt_tbl        non-real-time QoS priority table
+ * @memtype_count      number of defined memtypes
+ * @memtype            array of xin memtype definitions
+ */
+struct dpu_vbif_cfg {
+	DPU_HW_BLK_INFO;
+	u32 default_ot_rd_limit;
+	u32 default_ot_wr_limit;
+	u32 xin_halt_timeout;
+	struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+	struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+	struct dpu_vbif_qos_tbl qos_rt_tbl;
+	struct dpu_vbif_qos_tbl qos_nrt_tbl;
+	u32 memtype_count;
+	u32 memtype[MAX_XIN_COUNT];
+};
+/**
+ * struct dpu_reg_dma_cfg - information of lut dma blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @version            version of lutdma hw block
+ * @trigger_sel_off    offset to trigger select registers of lutdma
+ */
+struct dpu_reg_dma_cfg {
+	DPU_HW_BLK_INFO;
+	u32 version;
+	u32 trigger_sel_off;
+};
+
+/**
+ * Define CDP use cases
+ * @DPU_PERF_CDP_UDAGE_RT: real-time use cases
+ * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+	DPU_PERF_CDP_USAGE_RT,
+	DPU_PERF_CDP_USAGE_NRT,
+	DPU_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct dpu_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct dpu_perf_cdp_cfg {
+	bool rd_enable;
+	bool wr_enable;
+};
+
+/**
+ * struct dpu_perf_cfg - performance control settings
+ * @max_bw_low         low threshold of maximum bandwidth (kbps)
+ * @max_bw_high        high threshold of maximum bandwidth (kbps)
+ * @min_core_ib        minimum bandwidth for core (kbps)
+ * @min_core_ib        minimum mnoc ib vote in kbps
+ * @min_llcc_ib        minimum llcc ib vote in kbps
+ * @min_dram_ib        minimum dram ib vote in kbps
+ * @core_ib_ff         core instantaneous bandwidth fudge factor
+ * @core_clk_ff        core clock fudge factor
+ * @comp_ratio_rt      string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @comp_ratio_nrt     string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @undersized_prefill_lines   undersized prefill in lines
+ * @xtra_prefill_lines         extra prefill latency in lines
+ * @dest_scale_prefill_lines   destination scaler latency in lines
+ * @macrotile_perfill_lines    macrotile latency in lines
+ * @yuv_nv12_prefill_lines     yuv_nv12 latency in lines
+ * @linear_prefill_lines       linear latency in lines
+ * @downscaling_prefill_lines  downscaling latency in lines
+ * @amortizable_theshold minimum y position for traffic shaping prefill
+ * @min_prefill_lines  minimum pipeline latency in lines
+ * @safe_lut_tbl: LUT tables for safe signals
+ * @danger_lut_tbl: LUT tables for danger signals
+ * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg            cdp use case configurations
+ */
+struct dpu_perf_cfg {
+	u32 max_bw_low;
+	u32 max_bw_high;
+	u32 min_core_ib;
+	u32 min_llcc_ib;
+	u32 min_dram_ib;
+	const char *core_ib_ff;
+	const char *core_clk_ff;
+	const char *comp_ratio_rt;
+	const char *comp_ratio_nrt;
+	u32 undersized_prefill_lines;
+	u32 xtra_prefill_lines;
+	u32 dest_scale_prefill_lines;
+	u32 macrotile_prefill_lines;
+	u32 yuv_nv12_prefill_lines;
+	u32 linear_prefill_lines;
+	u32 downscaling_prefill_lines;
+	u32 amortizable_threshold;
+	u32 min_prefill_lines;
+	u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+	u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+	struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+	struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX];
+};
+
+/**
+ * struct dpu_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @dma_formats        Supported formats for dma pipe
+ * @cursor_formats     Supported formats for cursor pipe
+ * @vig_formats        Supported formats for vig pipe
+ */
+struct dpu_mdss_cfg {
+	u32 hwversion;
+
+	const struct dpu_caps *caps;
+
+	u32 mdp_count;
+	struct dpu_mdp_cfg *mdp;
+
+	u32 ctl_count;
+	struct dpu_ctl_cfg *ctl;
+
+	u32 sspp_count;
+	struct dpu_sspp_cfg *sspp;
+
+	u32 mixer_count;
+	struct dpu_lm_cfg *mixer;
+
+	u32 ds_count;
+	struct dpu_ds_cfg *ds;
+
+	u32 pingpong_count;
+	struct dpu_pingpong_cfg *pingpong;
+
+	u32 cdm_count;
+	struct dpu_cdm_cfg *cdm;
+
+	u32 intf_count;
+	struct dpu_intf_cfg *intf;
+
+	u32 vbif_count;
+	struct dpu_vbif_cfg *vbif;
+
+	u32 reg_dma_count;
+	struct dpu_reg_dma_cfg dma_cfg;
+
+	u32 ad_count;
+
+	/* Add additional block data structures here */
+
+	struct dpu_perf_cfg perf;
+	struct dpu_format_extended *dma_formats;
+	struct dpu_format_extended *cursor_formats;
+	struct dpu_format_extended *vig_formats;
+};
+
+struct dpu_mdss_hw_cfg_handler {
+	u32 hw_rev;
+	void (*cfg_init)(struct dpu_mdss_cfg *dpu_cfg);
+};
+
+/*
+ * Access Macros
+ */
+#define BLK_MDP(s) ((s)->mdp)
+#define BLK_CTL(s) ((s)->ctl)
+#define BLK_VIG(s) ((s)->vig)
+#define BLK_RGB(s) ((s)->rgb)
+#define BLK_DMA(s) ((s)->dma)
+#define BLK_CURSOR(s) ((s)->cursor)
+#define BLK_MIXER(s) ((s)->mixer)
+#define BLK_DS(s) ((s)->ds)
+#define BLK_PINGPONG(s) ((s)->pingpong)
+#define BLK_CDM(s) ((s)->cdm)
+#define BLK_INTF(s) ((s)->intf)
+#define BLK_AD(s) ((s)->ad)
+
+/**
+ * dpu_hw_catalog_init - dpu hardware catalog init API retrieves
+ * hardcoded target specific catalog information in config structure
+ * @hw_rev:       caller needs provide the hardware revision.
+ *
+ * Return: dpu config structure
+ */
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
+
+/**
+ * dpu_hw_catalog_deinit - dpu hardware catalog cleanup
+ * @dpu_cfg:      pointer returned from init function
+ */
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
+
+/**
+ * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
+ * @cfg:          pointer to sspp cfg
+ */
+static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
+{
+	return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
+			 test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
+}
+#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
new file mode 100644
index 0000000..3c9f028
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+
+static const struct dpu_format_extended plane_formats[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+	{0, 0},
+};
+
+static const struct dpu_format_extended plane_formats_yuv[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+
+	{DRM_FORMAT_NV12, 0},
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_NV21, 0},
+	{DRM_FORMAT_NV16, 0},
+	{DRM_FORMAT_NV61, 0},
+	{DRM_FORMAT_VYUY, 0},
+	{DRM_FORMAT_UYVY, 0},
+	{DRM_FORMAT_YUYV, 0},
+	{DRM_FORMAT_YVYU, 0},
+	{DRM_FORMAT_YUV420, 0},
+	{DRM_FORMAT_YVU420, 0},
+	{0, 0},
+};
+
+static const struct dpu_format_extended cursor_formats[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{0, 0},
+};
+
+static const struct dpu_format_extended wb2_formats[] = {
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+
+	{DRM_FORMAT_YUV420, 0},
+	{DRM_FORMAT_NV12, 0},
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_NV16, 0},
+	{DRM_FORMAT_YUYV, 0},
+
+	{0, 0},
+};
+
+static const struct dpu_format_extended rgb_10bit_formats[] = {
+	{DRM_FORMAT_BGRA1010102, 0},
+	{DRM_FORMAT_BGRX1010102, 0},
+	{DRM_FORMAT_RGBA1010102, 0},
+	{DRM_FORMAT_RGBX1010102, 0},
+	{DRM_FORMAT_ABGR2101010, 0},
+	{DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_XBGR2101010, 0},
+	{DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_ARGB2101010, 0},
+	{DRM_FORMAT_XRGB2101010, 0},
+};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
new file mode 100644
index 0000000..554874b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -0,0 +1,323 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CDM_CSC_10_OPMODE                  0x000
+#define CDM_CSC_10_BASE                    0x004
+
+#define CDM_CDWN2_OP_MODE                  0x100
+#define CDM_CDWN2_CLAMP_OUT                0x104
+#define CDM_CDWN2_PARAMS_3D_0              0x108
+#define CDM_CDWN2_PARAMS_3D_1              0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0         0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1         0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2         0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0        0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1        0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2        0x124
+#define CDM_CDWN2_COEFF_COSITE_V           0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V          0x12C
+#define CDM_CDWN2_OUT_SIZE                 0x130
+
+#define CDM_HDMI_PACK_OP_MODE              0x200
+#define CDM_CSC_10_MATRIX_COEFF_0          0x004
+
+/**
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/**
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/**
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/**
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct dpu_csc_cfg rgb2yuv_cfg = {
+	{
+		0x0083, 0x0102, 0x0032,
+		0x1fb5, 0x1f6c, 0x00e1,
+		0x00e1, 0x1f45, 0x1fdc
+	},
+	{ 0x00, 0x00, 0x00 },
+	{ 0x0040, 0x0200, 0x0200 },
+	{ 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+	{ 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
+static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->cdm_count; i++) {
+		if (cdm == m->cdm[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->cdm[i].base;
+			b->length = m->cdm[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_CDM;
+			return &m->cdm[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
+		struct dpu_csc_cfg *data)
+{
+	dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
+
+	return 0;
+}
+
+static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
+		struct dpu_hw_cdm_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 opmode = 0;
+	u32 out_size = 0;
+
+	if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
+		opmode &= ~BIT(7);
+	else
+		opmode |= BIT(7);
+
+	/* ENABLE DWNS_H bit */
+	opmode |= BIT(1);
+
+	switch (cfg->h_cdwn_type) {
+	case CDM_CDWN_DISABLE:
+		/* CLEAR METHOD_H field */
+		opmode &= ~(0x18);
+		/* CLEAR DWNS_H bit */
+		opmode &= ~BIT(1);
+		break;
+	case CDM_CDWN_PIXEL_DROP:
+		/* Clear METHOD_H field (pixel drop is 0) */
+		opmode &= ~(0x18);
+		break;
+	case CDM_CDWN_AVG:
+		/* Clear METHOD_H field (Average is 0x1) */
+		opmode &= ~(0x18);
+		opmode |= (0x1 << 0x3);
+		break;
+	case CDM_CDWN_COSITE:
+		/* Clear METHOD_H field (Average is 0x2) */
+		opmode &= ~(0x18);
+		opmode |= (0x2 << 0x3);
+		/* Co-site horizontal coefficients */
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+				cosite_h_coeff[0]);
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+				cosite_h_coeff[1]);
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+				cosite_h_coeff[2]);
+		break;
+	case CDM_CDWN_OFFSITE:
+		/* Clear METHOD_H field (Average is 0x3) */
+		opmode &= ~(0x18);
+		opmode |= (0x3 << 0x3);
+
+		/* Off-site horizontal coefficients */
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+				offsite_h_coeff[0]);
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+				offsite_h_coeff[1]);
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+				offsite_h_coeff[2]);
+		break;
+	default:
+		pr_err("%s invalid horz down sampling type\n", __func__);
+		return -EINVAL;
+	}
+
+	/* ENABLE DWNS_V bit */
+	opmode |= BIT(2);
+
+	switch (cfg->v_cdwn_type) {
+	case CDM_CDWN_DISABLE:
+		/* CLEAR METHOD_V field */
+		opmode &= ~(0x60);
+		/* CLEAR DWNS_V bit */
+		opmode &= ~BIT(2);
+		break;
+	case CDM_CDWN_PIXEL_DROP:
+		/* Clear METHOD_V field (pixel drop is 0) */
+		opmode &= ~(0x60);
+		break;
+	case CDM_CDWN_AVG:
+		/* Clear METHOD_V field (Average is 0x1) */
+		opmode &= ~(0x60);
+		opmode |= (0x1 << 0x5);
+		break;
+	case CDM_CDWN_COSITE:
+		/* Clear METHOD_V field (Average is 0x2) */
+		opmode &= ~(0x60);
+		opmode |= (0x2 << 0x5);
+		/* Co-site vertical coefficients */
+		DPU_REG_WRITE(c,
+				CDM_CDWN2_COEFF_COSITE_V,
+				cosite_v_coeff[0]);
+		break;
+	case CDM_CDWN_OFFSITE:
+		/* Clear METHOD_V field (Average is 0x3) */
+		opmode &= ~(0x60);
+		opmode |= (0x3 << 0x5);
+
+		/* Off-site vertical coefficients */
+		DPU_REG_WRITE(c,
+				CDM_CDWN2_COEFF_OFFSITE_V,
+				offsite_v_coeff[0]);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+		opmode |= BIT(0); /* EN CDWN module */
+	else
+		opmode &= ~BIT(0);
+
+	out_size = (cfg->output_width & 0xFFFF) |
+		((cfg->output_height & 0xFFFF) << 16);
+	DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+	DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+	DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
+			((0x3FF << 16) | 0x0));
+
+	return 0;
+}
+
+static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
+		struct dpu_hw_cdm_cfg *cdm)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	const struct dpu_format *fmt = cdm->output_fmt;
+	struct cdm_output_cfg cdm_cfg = { 0 };
+	u32 opmode = 0;
+	u32 csc = 0;
+
+	if (!DPU_FORMAT_IS_YUV(fmt))
+		return -EINVAL;
+
+	if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+		if (fmt->chroma_sample != DPU_CHROMA_H1V2)
+			return -EINVAL; /*unsupported format */
+		opmode = BIT(0);
+		opmode |= (fmt->chroma_sample << 1);
+		cdm_cfg.intf_en = true;
+	}
+
+	csc |= BIT(2);
+	csc &= ~BIT(1);
+	csc |= BIT(0);
+
+	if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+		ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+
+	DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+	DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+	return 0;
+}
+
+static void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
+{
+	struct cdm_output_cfg cdm_cfg = { 0 };
+
+	if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+		ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+}
+
+static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
+	unsigned long features)
+{
+	ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
+	ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
+	ops->enable = dpu_hw_cdm_enable;
+	ops->disable = dpu_hw_cdm_disable;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m,
+		struct dpu_hw_mdp *hw_mdp)
+{
+	struct dpu_hw_cdm *c;
+	struct dpu_cdm_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _cdm_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->idx = idx;
+	c->caps = cfg;
+	_setup_cdm_ops(&c->ops, c->caps->features);
+	c->hw_mdp = hw_mdp;
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	/*
+	 * Perform any default initialization for the chroma down module
+	 * @setup default csc coefficients
+	 */
+	dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
+{
+	if (cdm)
+		dpu_hw_blk_destroy(&cdm->base);
+	kfree(cdm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
new file mode 100644
index 0000000..5cceb1e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CDM_H
+#define _DPU_HW_CDM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_cdm;
+
+struct dpu_hw_cdm_cfg {
+	u32 output_width;
+	u32 output_height;
+	u32 output_bit_depth;
+	u32 h_cdwn_type;
+	u32 v_cdwn_type;
+	const struct dpu_format *output_fmt;
+	u32 output_type;
+	int flags;
+};
+
+enum dpu_hw_cdwn_type {
+	CDM_CDWN_DISABLE,
+	CDM_CDWN_PIXEL_DROP,
+	CDM_CDWN_AVG,
+	CDM_CDWN_COSITE,
+	CDM_CDWN_OFFSITE,
+};
+
+enum dpu_hw_cdwn_output_type {
+	CDM_CDWN_OUTPUT_HDMI,
+	CDM_CDWN_OUTPUT_WB,
+};
+
+enum dpu_hw_cdwn_output_bit_depth {
+	CDM_CDWN_OUTPUT_8BIT,
+	CDM_CDWN_OUTPUT_10BIT,
+};
+
+/**
+ * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ *                         Assumption is these functions will be called after
+ *                         clocks are enabled
+ *  @setup_csc:            Programs the csc matrix
+ *  @setup_cdwn:           Sets up the chroma down sub module
+ *  @enable:               Enables the output to interface and programs the
+ *                         output packer
+ *  @disable:              Puts the cdm in bypass mode
+ */
+struct dpu_hw_cdm_ops {
+	/**
+	 * Programs the CSC matrix for conversion from RGB space to YUV space,
+	 * it is optional to call this function as this matrix is automatically
+	 * set during initialization, user should call this if it wants
+	 * to program a different matrix than default matrix.
+	 * @cdm:          Pointer to the chroma down context structure
+	 * @data          Pointer to CSC configuration data
+	 * return:        0 if success; error code otherwise
+	 */
+	int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
+			struct dpu_csc_cfg *data);
+
+	/**
+	 * Programs the Chroma downsample part.
+	 * @cdm         Pointer to chroma down context
+	 */
+	int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
+	struct dpu_hw_cdm_cfg *cfg);
+
+	/**
+	 * Enable the CDM module
+	 * @cdm         Pointer to chroma down context
+	 */
+	int (*enable)(struct dpu_hw_cdm *cdm,
+	struct dpu_hw_cdm_cfg *cfg);
+
+	/**
+	 * Disable the CDM module
+	 * @cdm         Pointer to chroma down context
+	 */
+	void (*disable)(struct dpu_hw_cdm *cdm);
+};
+
+struct dpu_hw_cdm {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* chroma down */
+	const struct dpu_cdm_cfg *caps;
+	enum  dpu_cdm  idx;
+
+	/* mdp top hw driver */
+	struct dpu_hw_mdp *hw_mdp;
+
+	/* ops */
+	struct dpu_hw_cdm_ops ops;
+};
+
+/**
+ * dpu_hw_cdm - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_cdm, base);
+}
+
+/**
+ * dpu_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @idx:  cdm index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ * @hw_mdp:  pointer to mdp top hw driver object
+ */
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m,
+		struct dpu_hw_mdp *hw_mdp);
+
+/**
+ * dpu_hw_cdm_destroy - destroys CDM driver context
+ * @cdm:   pointer to CDM driver context
+ */
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
+
+#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
new file mode 100644
index 0000000..06be7cf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -0,0 +1,540 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define   CTL_LAYER(lm)                 \
+	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT(lm)             \
+	(0x40 + (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT2(lm)             \
+	(0x70 + (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT3(lm)             \
+	(0xA0 + (((lm) - LM_0) * 0x004))
+#define   CTL_TOP                       0x014
+#define   CTL_FLUSH                     0x018
+#define   CTL_START                     0x01C
+#define   CTL_PREPARE                   0x0d0
+#define   CTL_SW_RESET                  0x030
+#define   CTL_LAYER_EXTN_OFFSET         0x40
+
+#define CTL_MIXER_BORDER_OUT            BIT(24)
+#define CTL_FLUSH_MASK_CTL              BIT(17)
+
+#define DPU_REG_RESET_TIMEOUT_US        2000
+
+static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->ctl_count; i++) {
+		if (ctl == m->ctl[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->ctl[i].base;
+			b->length = m->ctl[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_CTL;
+			return &m->ctl[i];
+		}
+	}
+	return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
+		enum dpu_lm lm)
+{
+	int i;
+	int stages = -EINVAL;
+
+	for (i = 0; i < count; i++) {
+		if (lm == mixer[i].id) {
+			stages = mixer[i].sblk->maxblendstages;
+			break;
+		}
+	}
+
+	return stages;
+}
+
+static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
+{
+	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
+{
+	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
+static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
+{
+	ctx->pending_flush_mask = 0x0;
+}
+
+static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
+		u32 flushbits)
+{
+	ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+{
+	if (!ctx)
+		return 0x0;
+
+	return ctx->pending_flush_mask;
+}
+
+static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
+{
+
+	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+	return DPU_REG_READ(c, CTL_FLUSH);
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+	enum dpu_sspp sspp)
+{
+	uint32_t flushbits = 0;
+
+	switch (sspp) {
+	case SSPP_VIG0:
+		flushbits =  BIT(0);
+		break;
+	case SSPP_VIG1:
+		flushbits = BIT(1);
+		break;
+	case SSPP_VIG2:
+		flushbits = BIT(2);
+		break;
+	case SSPP_VIG3:
+		flushbits = BIT(18);
+		break;
+	case SSPP_RGB0:
+		flushbits = BIT(3);
+		break;
+	case SSPP_RGB1:
+		flushbits = BIT(4);
+		break;
+	case SSPP_RGB2:
+		flushbits = BIT(5);
+		break;
+	case SSPP_RGB3:
+		flushbits = BIT(19);
+		break;
+	case SSPP_DMA0:
+		flushbits = BIT(11);
+		break;
+	case SSPP_DMA1:
+		flushbits = BIT(12);
+		break;
+	case SSPP_DMA2:
+		flushbits = BIT(24);
+		break;
+	case SSPP_DMA3:
+		flushbits = BIT(25);
+		break;
+	case SSPP_CURSOR0:
+		flushbits = BIT(22);
+		break;
+	case SSPP_CURSOR1:
+		flushbits = BIT(23);
+		break;
+	default:
+		break;
+	}
+
+	return flushbits;
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+	enum dpu_lm lm)
+{
+	uint32_t flushbits = 0;
+
+	switch (lm) {
+	case LM_0:
+		flushbits = BIT(6);
+		break;
+	case LM_1:
+		flushbits = BIT(7);
+		break;
+	case LM_2:
+		flushbits = BIT(8);
+		break;
+	case LM_3:
+		flushbits = BIT(9);
+		break;
+	case LM_4:
+		flushbits = BIT(10);
+		break;
+	case LM_5:
+		flushbits = BIT(20);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	flushbits |= CTL_FLUSH_MASK_CTL;
+
+	return flushbits;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+		u32 *flushbits, enum dpu_intf intf)
+{
+	switch (intf) {
+	case INTF_0:
+		*flushbits |= BIT(31);
+		break;
+	case INTF_1:
+		*flushbits |= BIT(30);
+		break;
+	case INTF_2:
+		*flushbits |= BIT(29);
+		break;
+	case INTF_3:
+		*flushbits |= BIT(28);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
+		u32 *flushbits, enum dpu_cdm cdm)
+{
+	switch (cdm) {
+	case CDM_0:
+		*flushbits |= BIT(26);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	ktime_t timeout;
+	u32 status;
+
+	timeout = ktime_add_us(ktime_get(), timeout_us);
+
+	/*
+	 * it takes around 30us to have mdp finish resetting its ctl path
+	 * poll every 50us so that reset should be completed at 1st poll
+	 */
+	do {
+		status = DPU_REG_READ(c, CTL_SW_RESET);
+		status &= 0x1;
+		if (status)
+			usleep_range(20, 50);
+	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
+
+	return status;
+}
+
+static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
+	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 status;
+
+	status = DPU_REG_READ(c, CTL_SW_RESET);
+	status &= 0x01;
+	if (!status)
+		return 0;
+
+	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
+	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
+		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	int i;
+
+	for (i = 0; i < ctx->mixer_count; i++) {
+		DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
+		DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
+		DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
+		DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
+	}
+}
+
+static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
+	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
+	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
+	int i, j;
+	u8 stages;
+	int pipes_per_stage;
+
+	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+	if (stages < 0)
+		return;
+
+	if (test_bit(DPU_MIXER_SOURCESPLIT,
+		&ctx->mixer_hw_caps->features))
+		pipes_per_stage = PIPES_PER_STAGE;
+	else
+		pipes_per_stage = 1;
+
+	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
+
+	if (!stage_cfg)
+		goto exit;
+
+	for (i = 0; i <= stages; i++) {
+		/* overflow to ext register if 'i + 1 > 7' */
+		mix = (i + 1) & 0x7;
+		ext = i >= 7;
+
+		for (j = 0 ; j < pipes_per_stage; j++) {
+			enum dpu_sspp_multirect_index rect_index =
+				stage_cfg->multirect_index[i][j];
+
+			switch (stage_cfg->stage[i][j]) {
+			case SSPP_VIG0:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
+				} else {
+					mixercfg |= mix << 0;
+					mixercfg_ext |= ext << 0;
+				}
+				break;
+			case SSPP_VIG1:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
+				} else {
+					mixercfg |= mix << 3;
+					mixercfg_ext |= ext << 2;
+				}
+				break;
+			case SSPP_VIG2:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
+				} else {
+					mixercfg |= mix << 6;
+					mixercfg_ext |= ext << 4;
+				}
+				break;
+			case SSPP_VIG3:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
+				} else {
+					mixercfg |= mix << 26;
+					mixercfg_ext |= ext << 6;
+				}
+				break;
+			case SSPP_RGB0:
+				mixercfg |= mix << 9;
+				mixercfg_ext |= ext << 8;
+				break;
+			case SSPP_RGB1:
+				mixercfg |= mix << 12;
+				mixercfg_ext |= ext << 10;
+				break;
+			case SSPP_RGB2:
+				mixercfg |= mix << 15;
+				mixercfg_ext |= ext << 12;
+				break;
+			case SSPP_RGB3:
+				mixercfg |= mix << 29;
+				mixercfg_ext |= ext << 14;
+				break;
+			case SSPP_DMA0:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
+				} else {
+					mixercfg |= mix << 18;
+					mixercfg_ext |= ext << 16;
+				}
+				break;
+			case SSPP_DMA1:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
+				} else {
+					mixercfg |= mix << 21;
+					mixercfg_ext |= ext << 18;
+				}
+				break;
+			case SSPP_DMA2:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
+				} else {
+					mix |= (i + 1) & 0xF;
+					mixercfg_ext2 |= mix << 0;
+				}
+				break;
+			case SSPP_DMA3:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
+				} else {
+					mix |= (i + 1) & 0xF;
+					mixercfg_ext2 |= mix << 4;
+				}
+				break;
+			case SSPP_CURSOR0:
+				mixercfg_ext |= ((i + 1) & 0xF) << 20;
+				break;
+			case SSPP_CURSOR1:
+				mixercfg_ext |= ((i + 1) & 0xF) << 26;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+exit:
+	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
+}
+
+static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
+		struct dpu_hw_intf_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 intf_cfg = 0;
+
+	intf_cfg |= (cfg->intf & 0xF) << 4;
+
+	if (cfg->mode_3d) {
+		intf_cfg |= BIT(19);
+		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+	}
+
+	switch (cfg->intf_mode_sel) {
+	case DPU_CTL_MODE_SEL_VID:
+		intf_cfg &= ~BIT(17);
+		intf_cfg &= ~(0x3 << 15);
+		break;
+	case DPU_CTL_MODE_SEL_CMD:
+		intf_cfg |= BIT(17);
+		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+		break;
+	default:
+		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+		return;
+	}
+
+	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
+		unsigned long cap)
+{
+	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
+	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
+	ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
+	ops->trigger_start = dpu_hw_ctl_trigger_start;
+	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
+	ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+	ops->reset = dpu_hw_ctl_reset_control;
+	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
+	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
+	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
+	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
+	ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
+	ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_ctl *c;
+	struct dpu_ctl_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _ctl_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		pr_err("failed to create dpu_hw_ctl %d\n", idx);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->caps = cfg;
+	_setup_ctl_ops(&c->ops, c->caps->features);
+	c->idx = idx;
+	c->mixer_count = m->mixer_count;
+	c->mixer_hw_caps = m->mixer;
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
+{
+	if (ctx)
+		dpu_hw_blk_destroy(&ctx->base);
+	kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
new file mode 100644
index 0000000..c66a71f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -0,0 +1,218 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CTL_H
+#define _DPU_HW_CTL_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_blk.h"
+
+/**
+ * dpu_ctl_mode_sel: Interface mode selection
+ * DPU_CTL_MODE_SEL_VID:    Video mode interface
+ * DPU_CTL_MODE_SEL_CMD:    Command mode interface
+ */
+enum dpu_ctl_mode_sel {
+	DPU_CTL_MODE_SEL_VID = 0,
+	DPU_CTL_MODE_SEL_CMD
+};
+
+struct dpu_hw_ctl;
+/**
+ * struct dpu_hw_stage_cfg - blending stage cfg
+ * @stage : SSPP_ID at each stage
+ * @multirect_index: index of the rectangle of SSPP.
+ */
+struct dpu_hw_stage_cfg {
+	enum dpu_sspp stage[DPU_STAGE_MAX][PIPES_PER_STAGE];
+	enum dpu_sspp_multirect_index multirect_index
+					[DPU_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
+ * @intf :                 Interface id
+ * @mode_3d:               3d mux configuration
+ * @intf_mode_sel:         Interface mode, cmd / vid
+ * @stream_sel:            Stream selection for multi-stream interfaces
+ */
+struct dpu_hw_intf_cfg {
+	enum dpu_intf intf;
+	enum dpu_3d_blend_mode mode_3d;
+	enum dpu_ctl_mode_sel intf_mode_sel;
+	int stream_sel;
+};
+
+/**
+ * struct dpu_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_ctl_ops {
+	/**
+	 * kickoff hw operation for Sw controlled interfaces
+	 * DSI cmd mode and WB interface are SW controlled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_start)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * kickoff prepare is in progress hw operation for sw
+	 * controlled interfaces: DSI cmd mode and WB interface
+	 * are SW controlled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_pending)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Clear the value of the cached pending_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Query the value of the cached pending_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * OR in the given flushbits to the cached pending_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 * @flushbits : module flushmask
+	 */
+	void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
+		u32 flushbits);
+
+	/**
+	 * Write the value of the pending_flush_mask to hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_flush)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Read the value of the flush register
+	 * @ctx       : ctl path ctx pointer
+	 * @Return: value of the ctl flush register.
+	 */
+	u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Setup ctl_path interface config
+	 * @ctx
+	 * @cfg    : interface config structure pointer
+	 */
+	void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
+		struct dpu_hw_intf_cfg *cfg);
+
+	int (*reset)(struct dpu_hw_ctl *c);
+
+	/*
+	 * wait_reset_status - checks ctl reset status
+	 * @ctx       : ctl path ctx pointer
+	 *
+	 * This function checks the ctl reset status bit.
+	 * If the reset bit is set, it keeps polling the status till the hw
+	 * reset is complete.
+	 * Returns: 0 on success or -error if reset incomplete within interval
+	 */
+	int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
+
+	uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
+		enum dpu_sspp blk);
+
+	uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
+		enum dpu_lm blk);
+
+	int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
+		u32 *flushbits,
+		enum dpu_intf blk);
+
+	int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
+		u32 *flushbits,
+		enum dpu_cdm blk);
+
+	/**
+	 * Set all blend stages to disabled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Configure layer mixer to pipe configuration
+	 * @ctx       : ctl path ctx pointer
+	 * @lm        : layer mixer enumeration
+	 * @cfg       : blend stage configuration
+	 */
+	void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
+		enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_ctl : CTL PATH driver object
+ * @base: hardware block base structure
+ * @hw: block register map object
+ * @idx: control path index
+ * @caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @ops: operation list
+ */
+struct dpu_hw_ctl {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* ctl path */
+	int idx;
+	const struct dpu_ctl_cfg *caps;
+	int mixer_count;
+	const struct dpu_lm_cfg *mixer_hw_caps;
+	u32 pending_flush_mask;
+
+	/* ops */
+	struct dpu_hw_ctl_ops ops;
+};
+
+/**
+ * dpu_hw_ctl - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_ctl, base);
+}
+
+/**
+ * dpu_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx:  ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+
+#endif /*_DPU_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
new file mode 100644
index 0000000..c0b7f00
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -0,0 +1,1183 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDP base
+ */
+#define MDP_SSPP_TOP0_OFF		0x0
+#define MDP_INTF_0_OFF			0x6A000
+#define MDP_INTF_1_OFF			0x6A800
+#define MDP_INTF_2_OFF			0x6B000
+#define MDP_INTF_3_OFF			0x6B800
+#define MDP_INTF_4_OFF			0x6C000
+#define MDP_AD4_0_OFF			0x7C000
+#define MDP_AD4_1_OFF			0x7D000
+#define MDP_AD4_INTR_EN_OFF		0x41c
+#define MDP_AD4_INTR_CLEAR_OFF		0x424
+#define MDP_AD4_INTR_STATUS_OFF		0x420
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define DPU_INTR_WB_0_DONE BIT(0)
+#define DPU_INTR_WB_1_DONE BIT(1)
+#define DPU_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define DPU_INTR_WD_TIMER_0_DONE BIT(2)
+#define DPU_INTR_WD_TIMER_1_DONE BIT(3)
+#define DPU_INTR_WD_TIMER_2_DONE BIT(5)
+#define DPU_INTR_WD_TIMER_3_DONE BIT(6)
+#define DPU_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_DONE BIT(8)
+#define DPU_INTR_PING_PONG_1_DONE BIT(9)
+#define DPU_INTR_PING_PONG_2_DONE BIT(10)
+#define DPU_INTR_PING_PONG_3_DONE BIT(11)
+#define DPU_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define DPU_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define DPU_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define DPU_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define DPU_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define DPU_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define DPU_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define DPU_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define DPU_INTR_INTF_0_UNDERRUN BIT(24)
+#define DPU_INTR_INTF_1_UNDERRUN BIT(26)
+#define DPU_INTR_INTF_2_UNDERRUN BIT(28)
+#define DPU_INTR_INTF_3_UNDERRUN BIT(30)
+#define DPU_INTR_INTF_0_VSYNC BIT(25)
+#define DPU_INTR_INTF_1_VSYNC BIT(27)
+#define DPU_INTR_INTF_2_VSYNC BIT(29)
+#define DPU_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define DPU_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define DPU_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define DPU_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define DPU_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define DPU_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define DPU_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define DPU_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define DPU_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define DPU_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define DPU_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Ctl start interrupt status bit definitions
+ */
+#define DPU_INTR_CTL_0_START BIT(9)
+#define DPU_INTR_CTL_1_START BIT(10)
+#define DPU_INTR_CTL_2_START BIT(11)
+#define DPU_INTR_CTL_3_START BIT(12)
+#define DPU_INTR_CTL_4_START BIT(13)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define DPU_INTR_CWB_2_OVERFLOW BIT(14)
+#define DPU_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_DONE BIT(0)
+#define DPU_INTR_HIST_VIG_1_DONE BIT(4)
+#define DPU_INTR_HIST_VIG_2_DONE BIT(8)
+#define DPU_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define DPU_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define DPU_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define DPU_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_DONE BIT(12)
+#define DPU_INTR_HIST_DSPP_1_DONE BIT(16)
+#define DPU_INTR_HIST_DSPP_2_DONE BIT(20)
+#define DPU_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define DPU_INTR_VIDEO_INTO_STATIC BIT(0)
+#define DPU_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define DPU_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define DPU_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define DPU_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define DPU_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define DPU_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define DPU_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define DPU_INTR_PROG_LINE BIT(8)
+
+/**
+ * AD4 interrupt status bit definitions
+ */
+#define DPU_INTR_BRIGHTPR_UPDATED BIT(4)
+#define DPU_INTR_DARKENH_UPDATED BIT(3)
+#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2)
+#define DPU_INTR_STREN_INROI_UPDATED BIT(1)
+#define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
+/**
+ * struct dpu_intr_reg - array of DPU register sets
+ * @clr_off:	offset to CLEAR reg
+ * @en_off:	offset to ENABLE reg
+ * @status_off:	offset to STATUS reg
+ */
+struct dpu_intr_reg {
+	u32 clr_off;
+	u32 en_off;
+	u32 status_off;
+};
+
+/**
+ * struct dpu_irq_type - maps each irq with i/f
+ * @intr_type:		type of interrupt listed in dpu_intr_type
+ * @instance_idx:	instance index of the associated HW block in DPU
+ * @irq_mask:		corresponding bit in the interrupt status reg
+ * @reg_idx:		which reg set to use
+ */
+struct dpu_irq_type {
+	u32 intr_type;
+	u32 instance_idx;
+	u32 irq_mask;
+	u32 reg_idx;
+};
+
+/**
+ * List of DPU interrupt registers
+ */
+static const struct dpu_intr_reg dpu_intr_set[] = {
+	{
+		MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+		MDP_SSPP_TOP0_OFF+INTR_EN,
+		MDP_SSPP_TOP0_OFF+INTR_STATUS
+	},
+	{
+		MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+		MDP_SSPP_TOP0_OFF+INTR2_EN,
+		MDP_SSPP_TOP0_OFF+INTR2_STATUS
+	},
+	{
+		MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+		MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+		MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+	},
+	{
+		MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_0_OFF+INTF_INTR_EN,
+		MDP_INTF_0_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_1_OFF+INTF_INTR_EN,
+		MDP_INTF_1_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_2_OFF+INTF_INTR_EN,
+		MDP_INTF_2_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_3_OFF+INTF_INTR_EN,
+		MDP_INTF_3_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_4_OFF+INTF_INTR_EN,
+		MDP_INTF_4_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
+		MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
+		MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
+	},
+	{
+		MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
+		MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
+		MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
+	}
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ *                     a matching interface type and instance index.
+ */
+static const struct dpu_irq_type dpu_irq_map[] = {
+	/* BEGIN MAP_RANGE: 0-31, INTR */
+	/* irq_idx: 0-3 */
+	{ DPU_IRQ_TYPE_WB_ROT_COMP, WB_0, DPU_INTR_WB_0_DONE, 0},
+	{ DPU_IRQ_TYPE_WB_ROT_COMP, WB_1, DPU_INTR_WB_1_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_0, DPU_INTR_WD_TIMER_0_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_1, DPU_INTR_WD_TIMER_1_DONE, 0},
+	/* irq_idx: 4-7 */
+	{ DPU_IRQ_TYPE_WB_WFD_COMP, WB_2, DPU_INTR_WB_2_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_2, DPU_INTR_WD_TIMER_2_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_3, DPU_INTR_WD_TIMER_3_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_4, DPU_INTR_WD_TIMER_4_DONE, 0},
+	/* irq_idx: 8-11 */
+	{ DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_DONE, 0},
+	/* irq_idx: 12-15 */
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_RD_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_RD_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_RD_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_RD_PTR, 0},
+	/* irq_idx: 16-19 */
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_WR_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_WR_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_WR_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_WR_PTR, 0},
+	/* irq_idx: 20-23 */
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+	/* irq_idx: 24-27 */
+	{ DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, DPU_INTR_INTF_0_UNDERRUN, 0},
+	{ DPU_IRQ_TYPE_INTF_VSYNC, INTF_0, DPU_INTR_INTF_0_VSYNC, 0},
+	{ DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, DPU_INTR_INTF_1_UNDERRUN, 0},
+	{ DPU_IRQ_TYPE_INTF_VSYNC, INTF_1, DPU_INTR_INTF_1_VSYNC, 0},
+	/* irq_idx: 28-31 */
+	{ DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, DPU_INTR_INTF_2_UNDERRUN, 0},
+	{ DPU_IRQ_TYPE_INTF_VSYNC, INTF_2, DPU_INTR_INTF_2_VSYNC, 0},
+	{ DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, DPU_INTR_INTF_3_UNDERRUN, 0},
+	{ DPU_IRQ_TYPE_INTF_VSYNC, INTF_3, DPU_INTR_INTF_3_VSYNC, 0},
+
+	/* BEGIN MAP_RANGE: 32-64, INTR2 */
+	/* irq_idx: 32-35 */
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 36-39 */
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_WR_PTR, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 40 */
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_RD_PTR, 1},
+	/* irq_idx: 41-45 */
+	{ DPU_IRQ_TYPE_CTL_START, CTL_0,
+		DPU_INTR_CTL_0_START, 1},
+	{ DPU_IRQ_TYPE_CTL_START, CTL_1,
+		DPU_INTR_CTL_1_START, 1},
+	{ DPU_IRQ_TYPE_CTL_START, CTL_2,
+		DPU_INTR_CTL_2_START, 1},
+	{ DPU_IRQ_TYPE_CTL_START, CTL_3,
+		DPU_INTR_CTL_3_START, 1},
+	{ DPU_IRQ_TYPE_CTL_START, CTL_4,
+		DPU_INTR_CTL_4_START, 1},
+	/* irq_idx: 46-47 */
+	{ DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_2, DPU_INTR_CWB_2_OVERFLOW, 1},
+	{ DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_3, DPU_INTR_CWB_3_OVERFLOW, 1},
+	/* irq_idx: 48-51 */
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+	/* irq_idx: 52-55 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 56-59 */
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_TE_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_TE_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_TE_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_TE_DETECTED, 1},
+	/* irq_idx: 60-63 */
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_TE_DETECTED, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+	/* BEGIN MAP_RANGE: 64-95 HIST */
+	/* irq_idx: 64-67 */
+	{ DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, DPU_INTR_HIST_VIG_0_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+		DPU_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 68-71 */
+	{ DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, DPU_INTR_HIST_VIG_1_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+		DPU_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 72-75 */
+	{ DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, DPU_INTR_HIST_VIG_2_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+		DPU_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, DPU_INTR_HIST_VIG_3_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+		DPU_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+	/* irq_idx: 76-79 */
+	{ DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, DPU_INTR_HIST_DSPP_0_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+		DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 80-83 */
+	{ DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, DPU_INTR_HIST_DSPP_1_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+		DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 84-87 */
+	{ DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, DPU_INTR_HIST_DSPP_2_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+		DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, DPU_INTR_HIST_DSPP_3_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+		DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+	/* irq_idx: 88-91 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 92-95 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+	/* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+	/* irq_idx: 96-99 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+		DPU_INTR_VIDEO_INTO_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 3},
+	/* irq_idx: 100-103 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 3},
+	/* irq_idx: 104-107 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_0, DPU_INTR_PROG_LINE, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 108-111 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 112-115 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 116-119 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 120-123 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 124-127 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+	/* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+	/* irq_idx: 128-131 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+		DPU_INTR_VIDEO_INTO_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 4},
+	/* irq_idx: 132-135 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 4},
+	/* irq_idx: 136-139 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_1, DPU_INTR_PROG_LINE, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 140-143 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 144-147 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 148-151 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 152-155 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 156-159 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+	/* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+	/* irq_idx: 160-163 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+		DPU_INTR_VIDEO_INTO_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 5},
+	/* irq_idx: 164-167 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 5},
+	/* irq_idx: 168-171 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_2, DPU_INTR_PROG_LINE, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 172-175 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 176-179 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 180-183 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 184-187 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 188-191 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+	/* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+	/* irq_idx: 192-195 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+		DPU_INTR_VIDEO_INTO_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 6},
+	/* irq_idx: 196-199 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 6},
+	/* irq_idx: 200-203 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_3, DPU_INTR_PROG_LINE, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 204-207 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 208-211 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 212-215 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 216-219 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 220-223 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+	/* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+	/* irq_idx: 224-227 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+		DPU_INTR_VIDEO_INTO_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 7},
+	/* irq_idx: 228-231 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 7},
+	/* irq_idx: 232-235 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_4, DPU_INTR_PROG_LINE, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 236-239 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 240-243 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 244-247 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 248-251 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 252-255 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+
+	/* BEGIN MAP_RANGE: 256-287 AD4_0_INTR */
+	/* irq_idx: 256-259 */
+	{ DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_0, DPU_INTR_BACKLIGHT_UPDATED, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 260-263 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 264-267 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 268-271 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 272-275 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 276-279 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 280-283 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 284-287 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+
+	/* BEGIN MAP_RANGE: 288-319 AD4_1_INTR */
+	/* irq_idx: 288-291 */
+	{ DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_1, DPU_INTR_BACKLIGHT_UPDATED, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 292-295 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 296-299 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 300-303 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 304-307 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 308-311 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 312-315 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 315-319 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+};
+
+static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type,
+		u32 instance_idx)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_irq_map); i++) {
+		if (intr_type == dpu_irq_map[i].intr_type &&
+			instance_idx == dpu_irq_map[i].instance_idx)
+			return i;
+	}
+
+	pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+			intr_type, instance_idx);
+	return -EINVAL;
+}
+
+static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off,
+		uint32_t mask)
+{
+	if (!intr)
+		return;
+
+	DPU_REG_WRITE(&intr->hw, reg_off, mask);
+
+	/* ensure register writes go through */
+	wmb();
+}
+
+static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
+		void (*cbfunc)(void *, int),
+		void *arg)
+{
+	int reg_idx;
+	int irq_idx;
+	int start_idx;
+	int end_idx;
+	u32 irq_status;
+	unsigned long irq_flags;
+
+	if (!intr)
+		return;
+
+	/*
+	 * The dispatcher will save the IRQ status before calling here.
+	 * Now need to go through each IRQ status and find matching
+	 * irq lookup index.
+	 */
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
+		irq_status = intr->save_irq_status[reg_idx];
+
+		/*
+		 * Each Interrupt register has a range of 32 indexes, and
+		 * that is static for dpu_irq_map.
+		 */
+		start_idx = reg_idx * 32;
+		end_idx = start_idx + 32;
+
+		if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
+				end_idx > ARRAY_SIZE(dpu_irq_map))
+			continue;
+
+		/*
+		 * Search through matching intr status from irq map.
+		 * start_idx and end_idx defined the search range in
+		 * the dpu_irq_map.
+		 */
+		for (irq_idx = start_idx;
+				(irq_idx < end_idx) && irq_status;
+				irq_idx++)
+			if ((irq_status & dpu_irq_map[irq_idx].irq_mask) &&
+				(dpu_irq_map[irq_idx].reg_idx == reg_idx)) {
+				/*
+				 * Once a match on irq mask, perform a callback
+				 * to the given cbfunc. cbfunc will take care
+				 * the interrupt status clearing. If cbfunc is
+				 * not provided, then the interrupt clearing
+				 * is here.
+				 */
+				if (cbfunc)
+					cbfunc(arg, irq_idx);
+				else
+					intr->ops.clear_intr_status_nolock(
+							intr, irq_idx);
+
+				/*
+				 * When callback finish, clear the irq_status
+				 * with the matching mask. Once irq_status
+				 * is all cleared, the search can be stopped.
+				 */
+				irq_status &= ~dpu_irq_map[irq_idx].irq_mask;
+			}
+	}
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static int dpu_hw_intr_enable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+	int reg_idx;
+	unsigned long irq_flags;
+	const struct dpu_intr_reg *reg;
+	const struct dpu_irq_type *irq;
+	const char *dbgstr = NULL;
+	uint32_t cache_irq_mask;
+
+	if (!intr)
+		return -EINVAL;
+
+	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	irq = &dpu_irq_map[irq_idx];
+	reg_idx = irq->reg_idx;
+	reg = &dpu_intr_set[reg_idx];
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	cache_irq_mask = intr->cache_irq_mask[reg_idx];
+	if (cache_irq_mask & irq->irq_mask) {
+		dbgstr = "DPU IRQ already set:";
+	} else {
+		dbgstr = "DPU IRQ enabled:";
+
+		cache_irq_mask |= irq->irq_mask;
+		/* Cleaning any pending interrupt */
+		DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+		/* Enabling interrupts with the new mask */
+		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+		/* ensure register write goes through */
+		wmb();
+
+		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+	}
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+	pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+			irq->irq_mask, cache_irq_mask);
+
+	return 0;
+}
+
+static int dpu_hw_intr_disable_irq_nolock(struct dpu_hw_intr *intr, int irq_idx)
+{
+	int reg_idx;
+	const struct dpu_intr_reg *reg;
+	const struct dpu_irq_type *irq;
+	const char *dbgstr = NULL;
+	uint32_t cache_irq_mask;
+
+	if (!intr)
+		return -EINVAL;
+
+	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	irq = &dpu_irq_map[irq_idx];
+	reg_idx = irq->reg_idx;
+	reg = &dpu_intr_set[reg_idx];
+
+	cache_irq_mask = intr->cache_irq_mask[reg_idx];
+	if ((cache_irq_mask & irq->irq_mask) == 0) {
+		dbgstr = "DPU IRQ is already cleared:";
+	} else {
+		dbgstr = "DPU IRQ mask disable:";
+
+		cache_irq_mask &= ~irq->irq_mask;
+		/* Disable interrupts based on the new mask */
+		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+		/* Cleaning any pending interrupt */
+		DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+		/* ensure register write goes through */
+		wmb();
+
+		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+	}
+
+	pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+			irq->irq_mask, cache_irq_mask);
+
+	return 0;
+}
+
+static int dpu_hw_intr_disable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+	unsigned long irq_flags;
+
+	if (!intr)
+		return -EINVAL;
+
+	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	dpu_hw_intr_disable_irq_nolock(intr, irq_idx);
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+	return 0;
+}
+
+static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
+{
+	int i;
+
+	if (!intr)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+		DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
+
+	/* ensure register writes go through */
+	wmb();
+
+	return 0;
+}
+
+static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
+{
+	int i;
+
+	if (!intr)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+		DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
+
+	/* ensure register writes go through */
+	wmb();
+
+	return 0;
+}
+
+static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr,
+		uint32_t *mask)
+{
+	if (!intr || !mask)
+		return -EINVAL;
+
+	*mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+		| IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+
+	return 0;
+}
+
+static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
+{
+	int i;
+	u32 enable_mask;
+	unsigned long irq_flags;
+
+	if (!intr)
+		return;
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+		/* Read interrupt status */
+		intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
+				dpu_intr_set[i].status_off);
+
+		/* Read enable mask */
+		enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[i].en_off);
+
+		/* and clear the interrupt */
+		if (intr->save_irq_status[i])
+			DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off,
+					intr->save_irq_status[i]);
+
+		/* Finally update IRQ status based on enable mask */
+		intr->save_irq_status[i] &= enable_mask;
+	}
+
+	/* ensure register writes go through */
+	wmb();
+
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
+		int irq_idx)
+{
+	int reg_idx;
+
+	if (!intr)
+		return;
+
+	reg_idx = dpu_irq_map[irq_idx].reg_idx;
+	DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+			dpu_irq_map[irq_idx].irq_mask);
+
+	/* ensure register writes go through */
+	wmb();
+}
+
+static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr,
+		int irq_idx)
+{
+	unsigned long irq_flags;
+
+	if (!intr)
+		return;
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
+		int irq_idx, bool clear)
+{
+	int reg_idx;
+	unsigned long irq_flags;
+	u32 intr_status;
+
+	if (!intr)
+		return 0;
+
+	if (irq_idx >= ARRAY_SIZE(dpu_irq_map) || irq_idx < 0) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return 0;
+	}
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+
+	reg_idx = dpu_irq_map[irq_idx].reg_idx;
+	intr_status = DPU_REG_READ(&intr->hw,
+			dpu_intr_set[reg_idx].status_off) &
+					dpu_irq_map[irq_idx].irq_mask;
+	if (intr_status && clear)
+		DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+				intr_status);
+
+	/* ensure register writes go through */
+	wmb();
+
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+	return intr_status;
+}
+
+static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
+{
+	ops->set_mask = dpu_hw_intr_set_mask;
+	ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
+	ops->enable_irq = dpu_hw_intr_enable_irq;
+	ops->disable_irq = dpu_hw_intr_disable_irq;
+	ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
+	ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
+	ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
+	ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts;
+	ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
+	ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status;
+	ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
+	ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
+}
+
+static void __intr_offset(struct dpu_mdss_cfg *m,
+		void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
+{
+	hw->base_off = addr;
+	hw->blk_off = m->mdp[0].base;
+	hw->hwversion = m->hwversion;
+}
+
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_intr *intr;
+
+	if (!addr || !m)
+		return ERR_PTR(-EINVAL);
+
+	intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+	if (!intr)
+		return ERR_PTR(-ENOMEM);
+
+	__intr_offset(m, addr, &intr->hw);
+	__setup_intr_ops(&intr->ops);
+
+	intr->irq_idx_tbl_size = ARRAY_SIZE(dpu_irq_map);
+
+	intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+			GFP_KERNEL);
+	if (intr->cache_irq_mask == NULL) {
+		kfree(intr);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	intr->save_irq_status = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+			GFP_KERNEL);
+	if (intr->save_irq_status == NULL) {
+		kfree(intr->cache_irq_mask);
+		kfree(intr);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spin_lock_init(&intr->irq_lock);
+
+	return intr;
+}
+
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
+{
+	if (intr) {
+		kfree(intr->cache_irq_mask);
+		kfree(intr->save_irq_status);
+		kfree(intr);
+	}
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
new file mode 100644
index 0000000..61e4cba
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -0,0 +1,257 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTERRUPTS_H
+#define _DPU_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP		BIT(0)
+#define IRQ_SOURCE_DSI0		BIT(4)
+#define IRQ_SOURCE_DSI1		BIT(5)
+#define IRQ_SOURCE_HDMI		BIT(8)
+#define IRQ_SOURCE_EDP		BIT(12)
+#define IRQ_SOURCE_MHL		BIT(16)
+
+/**
+ * dpu_intr_type - HW Interrupt Type
+ * @DPU_IRQ_TYPE_WB_ROT_COMP:		WB rotator done
+ * @DPU_IRQ_TYPE_WB_WFD_COMP:		WB WFD done
+ * @DPU_IRQ_TYPE_PING_PONG_COMP:	PingPong done
+ * @DPU_IRQ_TYPE_PING_PONG_RD_PTR:	PingPong read pointer
+ * @DPU_IRQ_TYPE_PING_PONG_WR_PTR:	PingPong write pointer
+ * @DPU_IRQ_TYPE_PING_PONG_AUTO_REF:	PingPong auto refresh
+ * @DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK:	PingPong Tear check
+ * @DPU_IRQ_TYPE_PING_PONG_TE_CHECK:	PingPong TE detection
+ * @DPU_IRQ_TYPE_INTF_UNDER_RUN:	INTF underrun
+ * @DPU_IRQ_TYPE_INTF_VSYNC:		INTF VSYNC
+ * @DPU_IRQ_TYPE_CWB_OVERFLOW:		Concurrent WB overflow
+ * @DPU_IRQ_TYPE_HIST_VIG_DONE:		VIG Histogram done
+ * @DPU_IRQ_TYPE_HIST_VIG_RSTSEQ:	VIG Histogram reset
+ * @DPU_IRQ_TYPE_HIST_DSPP_DONE:	DSPP Histogram done
+ * @DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ:	DSPP Histogram reset
+ * @DPU_IRQ_TYPE_WD_TIMER:		Watchdog timer
+ * @DPU_IRQ_TYPE_SFI_VIDEO_IN:		Video static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_VIDEO_OUT:		Video static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_IN:		DSI CMD0 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_OUT:		DSI CMD0 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_IN:		DSI CMD1 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_OUT:		DSI CMD1 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_IN:		DSI CMD2 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_OUT:		DSI CMD2 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_PROG_LINE:		Programmable Line interrupt
+ * @DPU_IRQ_TYPE_AD4_BL_DONE:		AD4 backlight
+ * @DPU_IRQ_TYPE_CTL_START:		Control start
+ * @DPU_IRQ_TYPE_RESERVED:		Reserved for expansion
+ */
+enum dpu_intr_type {
+	DPU_IRQ_TYPE_WB_ROT_COMP,
+	DPU_IRQ_TYPE_WB_WFD_COMP,
+	DPU_IRQ_TYPE_PING_PONG_COMP,
+	DPU_IRQ_TYPE_PING_PONG_RD_PTR,
+	DPU_IRQ_TYPE_PING_PONG_WR_PTR,
+	DPU_IRQ_TYPE_PING_PONG_AUTO_REF,
+	DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+	DPU_IRQ_TYPE_PING_PONG_TE_CHECK,
+	DPU_IRQ_TYPE_INTF_UNDER_RUN,
+	DPU_IRQ_TYPE_INTF_VSYNC,
+	DPU_IRQ_TYPE_CWB_OVERFLOW,
+	DPU_IRQ_TYPE_HIST_VIG_DONE,
+	DPU_IRQ_TYPE_HIST_VIG_RSTSEQ,
+	DPU_IRQ_TYPE_HIST_DSPP_DONE,
+	DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+	DPU_IRQ_TYPE_WD_TIMER,
+	DPU_IRQ_TYPE_SFI_VIDEO_IN,
+	DPU_IRQ_TYPE_SFI_VIDEO_OUT,
+	DPU_IRQ_TYPE_SFI_CMD_0_IN,
+	DPU_IRQ_TYPE_SFI_CMD_0_OUT,
+	DPU_IRQ_TYPE_SFI_CMD_1_IN,
+	DPU_IRQ_TYPE_SFI_CMD_1_OUT,
+	DPU_IRQ_TYPE_SFI_CMD_2_IN,
+	DPU_IRQ_TYPE_SFI_CMD_2_OUT,
+	DPU_IRQ_TYPE_PROG_LINE,
+	DPU_IRQ_TYPE_AD4_BL_DONE,
+	DPU_IRQ_TYPE_CTL_START,
+	DPU_IRQ_TYPE_RESERVED,
+};
+
+struct dpu_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct dpu_hw_intr_ops {
+	/**
+	 * set_mask - Programs the given interrupt register with the
+	 *            given interrupt mask. Register value will get overwritten.
+	 * @intr:	HW interrupt handle
+	 * @reg_off:	MDSS HW register offset
+	 * @irqmask:	IRQ mask value
+	 */
+	void (*set_mask)(
+			struct dpu_hw_intr *intr,
+			uint32_t reg,
+			uint32_t irqmask);
+
+	/**
+	 * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+	 *                 Used for all irq related ops
+	 * @intr_type:		Interrupt type defined in dpu_intr_type
+	 * @instance_idx:	HW interrupt block instance
+	 * @return:		irq_idx or -EINVAL for lookup fail
+	 */
+	int (*irq_idx_lookup)(
+			enum dpu_intr_type intr_type,
+			u32 instance_idx);
+
+	/**
+	 * enable_irq - Enable IRQ based on lookup IRQ index
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*enable_irq)(
+			struct dpu_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * disable_irq - Disable IRQ based on lookup IRQ index
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*disable_irq)(
+			struct dpu_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+	 *                  any asserted IRQs). Useful during reset.
+	 * @intr:	HW interrupt handle
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*clear_all_irqs)(
+			struct dpu_hw_intr *intr);
+
+	/**
+	 * disable_all_irqs - Disables all the interrupts. Useful during reset.
+	 * @intr:	HW interrupt handle
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*disable_all_irqs)(
+			struct dpu_hw_intr *intr);
+
+	/**
+	 * dispatch_irqs - IRQ dispatcher will call the given callback
+	 *                 function when a matching interrupt status bit is
+	 *                 found in the irq mapping table.
+	 * @intr:	HW interrupt handle
+	 * @cbfunc:	Callback function pointer
+	 * @arg:	Argument to pass back during callback
+	 */
+	void (*dispatch_irqs)(
+			struct dpu_hw_intr *intr,
+			void (*cbfunc)(void *arg, int irq_idx),
+			void *arg);
+
+	/**
+	 * get_interrupt_statuses - Gets and store value from all interrupt
+	 *                          status registers that are currently fired.
+	 * @intr:	HW interrupt handle
+	 */
+	void (*get_interrupt_statuses)(
+			struct dpu_hw_intr *intr);
+
+	/**
+	 * clear_interrupt_status - Clears HW interrupt status based on given
+	 *                          lookup IRQ index.
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 */
+	void (*clear_interrupt_status)(
+			struct dpu_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * clear_intr_status_nolock() - clears the HW interrupts without lock
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 */
+	void (*clear_intr_status_nolock)(
+			struct dpu_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * get_interrupt_status - Gets HW interrupt status, and clear if set,
+	 *                        based on given lookup IRQ index.
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @clear:	True to clear irq after read
+	 */
+	u32 (*get_interrupt_status)(
+			struct dpu_hw_intr *intr,
+			int irq_idx,
+			bool clear);
+
+	/**
+	 * get_valid_interrupts - Gets a mask of all valid interrupt sources
+	 *                        within DPU. These are actually status bits
+	 *                        within interrupt registers that specify the
+	 *                        source of the interrupt in IRQs. For example,
+	 *                        valid interrupt sources can be MDP, DSI,
+	 *                        HDMI etc.
+	 * @intr:	HW interrupt handle
+	 * @mask:	Returning the interrupt source MASK
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*get_valid_interrupts)(
+			struct dpu_hw_intr *intr,
+			uint32_t *mask);
+};
+
+/**
+ * struct dpu_hw_intr: hw interrupts handling data structure
+ * @hw:               virtual address mapping
+ * @ops:              function pointer mapping for IRQ handling
+ * @cache_irq_mask:   array of IRQ enable masks reg storage created during init
+ * @save_irq_status:  array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @irq_lock:         spinlock for accessing IRQ resources
+ */
+struct dpu_hw_intr {
+	struct dpu_hw_blk_reg_map hw;
+	struct dpu_hw_intr_ops ops;
+	u32 *cache_irq_mask;
+	u32 *save_irq_status;
+	u32 irq_idx_tbl_size;
+	spinlock_t irq_lock;
+};
+
+/**
+ * dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
new file mode 100644
index 0000000..d280df5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -0,0 +1,349 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define INTF_TIMING_ENGINE_EN           0x000
+#define INTF_CONFIG                     0x004
+#define INTF_HSYNC_CTL                  0x008
+#define INTF_VSYNC_PERIOD_F0            0x00C
+#define INTF_VSYNC_PERIOD_F1            0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0       0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1       0x018
+#define INTF_DISPLAY_V_START_F0         0x01C
+#define INTF_DISPLAY_V_START_F1         0x020
+#define INTF_DISPLAY_V_END_F0           0x024
+#define INTF_DISPLAY_V_END_F1           0x028
+#define INTF_ACTIVE_V_START_F0          0x02C
+#define INTF_ACTIVE_V_START_F1          0x030
+#define INTF_ACTIVE_V_END_F0            0x034
+#define INTF_ACTIVE_V_END_F1            0x038
+#define INTF_DISPLAY_HCTL               0x03C
+#define INTF_ACTIVE_HCTL                0x040
+#define INTF_BORDER_COLOR               0x044
+#define INTF_UNDERFLOW_COLOR            0x048
+#define INTF_HSYNC_SKEW                 0x04C
+#define INTF_POLARITY_CTL               0x050
+#define INTF_TEST_CTL                   0x054
+#define INTF_TP_COLOR0                  0x058
+#define INTF_TP_COLOR1                  0x05C
+#define INTF_FRAME_LINE_COUNT_EN        0x0A8
+#define INTF_FRAME_COUNT                0x0AC
+#define   INTF_LINE_COUNT               0x0B0
+
+#define   INTF_DEFLICKER_CONFIG         0x0F0
+#define   INTF_DEFLICKER_STRNG_COEFF    0x0F4
+#define   INTF_DEFLICKER_WEAK_COEFF     0x0F8
+
+#define   INTF_DSI_CMD_MODE_TRIGGER_EN  0x084
+#define   INTF_PANEL_FORMAT             0x090
+#define   INTF_TPG_ENABLE               0x100
+#define   INTF_TPG_MAIN_CONTROL         0x104
+#define   INTF_TPG_VIDEO_CONFIG         0x108
+#define   INTF_TPG_COMPONENT_LIMITS     0x10C
+#define   INTF_TPG_RECTANGLE            0x110
+#define   INTF_TPG_INITIAL_VALUE        0x114
+#define   INTF_TPG_BLK_WHITE_PATTERN_FRAMES   0x118
+#define   INTF_TPG_RGB_MAPPING          0x11C
+#define   INTF_PROG_FETCH_START         0x170
+#define   INTF_PROG_ROT_START           0x174
+
+#define   INTF_FRAME_LINE_COUNT_EN      0x0A8
+#define   INTF_FRAME_COUNT              0x0AC
+#define   INTF_LINE_COUNT               0x0B0
+
+#define INTF_MISR_CTRL			0x180
+#define INTF_MISR_SIGNATURE		0x184
+
+static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->intf_count; i++) {
+		if ((intf == m->intf[i].id) &&
+		(m->intf[i].type != INTF_NONE)) {
+			b->base_off = addr;
+			b->blk_off = m->intf[i].base;
+			b->length = m->intf[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_INTF;
+			return &m->intf[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+		const struct intf_timing_params *p,
+		const struct dpu_format *fmt)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 hsync_period, vsync_period;
+	u32 display_v_start, display_v_end;
+	u32 hsync_start_x, hsync_end_x;
+	u32 active_h_start, active_h_end;
+	u32 active_v_start, active_v_end;
+	u32 active_hctl, display_hctl, hsync_ctl;
+	u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+	u32 panel_format;
+	u32 intf_cfg;
+
+	/* read interface_cfg */
+	intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
+	hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+	p->h_front_porch;
+	vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+	p->v_front_porch;
+
+	display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+	hsync_period) + p->hsync_skew;
+	display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+	p->hsync_skew - 1;
+
+	if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
+		display_v_start += p->hsync_pulse_width + p->h_back_porch;
+		display_v_end -= p->h_front_porch;
+	}
+
+	hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+	hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+	if (p->width != p->xres) {
+		active_h_start = hsync_start_x;
+		active_h_end = active_h_start + p->xres - 1;
+	} else {
+		active_h_start = 0;
+		active_h_end = 0;
+	}
+
+	if (p->height != p->yres) {
+		active_v_start = display_v_start;
+		active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+	} else {
+		active_v_start = 0;
+		active_v_end = 0;
+	}
+
+	if (active_h_end) {
+		active_hctl = (active_h_end << 16) | active_h_start;
+		intf_cfg |= BIT(29);	/* ACTIVE_H_ENABLE */
+	} else {
+		active_hctl = 0;
+	}
+
+	if (active_v_end)
+		intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+
+	hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+	den_polarity = 0;
+	if (ctx->cap->type == INTF_HDMI) {
+		hsync_polarity = p->yres >= 720 ? 0 : 1;
+		vsync_polarity = p->yres >= 720 ? 0 : 1;
+	} else {
+		hsync_polarity = 0;
+		vsync_polarity = 0;
+	}
+	polarity_ctl = (den_polarity << 2) | /*  DEN Polarity  */
+		(vsync_polarity << 1) | /* VSYNC Polarity */
+		(hsync_polarity << 0);  /* HSYNC Polarity */
+
+	if (!DPU_FORMAT_IS_YUV(fmt))
+		panel_format = (fmt->bits[C0_G_Y] |
+				(fmt->bits[C1_B_Cb] << 2) |
+				(fmt->bits[C2_R_Cr] << 4) |
+				(0x21 << 8));
+	else
+		/* Interface treats all the pixel data in RGB888 format */
+		panel_format = (COLOR_8BIT |
+				(COLOR_8BIT << 2) |
+				(COLOR_8BIT << 4) |
+				(0x21 << 8));
+
+	DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+	DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+	DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+			p->vsync_pulse_width * hsync_period);
+	DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+	DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+	DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+	DPU_REG_WRITE(c, INTF_ACTIVE_HCTL,  active_hctl);
+	DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+	DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+	DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+	DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+	DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+	DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+	DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+	DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+	DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+}
+
+static void dpu_hw_intf_enable_timing_engine(
+		struct dpu_hw_intf *intf,
+		u8 enable)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+	/* Note: Display interface select is handled in top block hw layer */
+	DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void dpu_hw_intf_setup_prg_fetch(
+		struct dpu_hw_intf *intf,
+		const struct intf_prog_fetch *fetch)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+	int fetch_enable;
+
+	/*
+	 * Fetch should always be outside the active lines. If the fetching
+	 * is programmed within active region, hardware behavior is unknown.
+	 */
+
+	fetch_enable = DPU_REG_READ(c, INTF_CONFIG);
+	if (fetch->enable) {
+		fetch_enable |= BIT(31);
+		DPU_REG_WRITE(c, INTF_PROG_FETCH_START,
+				fetch->fetch_start);
+	} else {
+		fetch_enable &= ~BIT(31);
+	}
+
+	DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void dpu_hw_intf_get_status(
+		struct dpu_hw_intf *intf,
+		struct intf_status *s)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+	s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+	if (s->is_en) {
+		s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
+		s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
+	} else {
+		s->line_count = 0;
+		s->frame_count = 0;
+	}
+}
+
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
+						bool enable, u32 frame_count)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+	u32 config = 0;
+
+	DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+	/* clear misr data */
+	wmb();
+
+	if (enable)
+		config = (frame_count & MISR_FRAME_COUNT_MASK) |
+			MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+	DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+	return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
+}
+
+static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
+{
+	struct dpu_hw_blk_reg_map *c;
+
+	if (!intf)
+		return 0;
+
+	c = &intf->hw;
+
+	return DPU_REG_READ(c, INTF_LINE_COUNT);
+}
+
+static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
+		unsigned long cap)
+{
+	ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
+	ops->setup_prg_fetch  = dpu_hw_intf_setup_prg_fetch;
+	ops->get_status = dpu_hw_intf_get_status;
+	ops->enable_timing = dpu_hw_intf_enable_timing_engine;
+	ops->setup_misr = dpu_hw_intf_setup_misr;
+	ops->collect_misr = dpu_hw_intf_collect_misr;
+	ops->get_line_count = dpu_hw_intf_get_line_count;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_intf *c;
+	struct dpu_intf_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _intf_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		pr_err("failed to create dpu_hw_intf %d\n", idx);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * Assign ops
+	 */
+	c->idx = idx;
+	c->cap = cfg;
+	c->mdss = m;
+	_setup_intf_ops(&c->ops, c->cap->features);
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
+{
+	if (intf)
+		dpu_hw_blk_destroy(&intf->base);
+	kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
new file mode 100644
index 0000000..a79d735
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTF_H
+#define _DPU_HW_INTF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_intf;
+
+/* intf timing settings */
+struct intf_timing_params {
+	u32 width;		/* active width */
+	u32 height;		/* active height */
+	u32 xres;		/* Display panel width */
+	u32 yres;		/* Display panel height */
+
+	u32 h_back_porch;
+	u32 h_front_porch;
+	u32 v_back_porch;
+	u32 v_front_porch;
+	u32 hsync_pulse_width;
+	u32 vsync_pulse_width;
+	u32 hsync_polarity;
+	u32 vsync_polarity;
+	u32 border_clr;
+	u32 underflow_clr;
+	u32 hsync_skew;
+};
+
+struct intf_prog_fetch {
+	u8 enable;
+	/* vsync counter for the front porch pixel line */
+	u32 fetch_start;
+};
+
+struct intf_status {
+	u8 is_en;		/* interface timing engine is enabled or not */
+	u32 frame_count;	/* frame count since timing engine enabled */
+	u32 line_count;		/* current line count including blanking */
+};
+
+/**
+ * struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ setup_misr: enables/disables MISR in HW register
+ * @ collect_misr: reads and stores MISR data from HW register
+ * @ get_line_count: reads current vertical line counter
+ */
+struct dpu_hw_intf_ops {
+	void (*setup_timing_gen)(struct dpu_hw_intf *intf,
+			const struct intf_timing_params *p,
+			const struct dpu_format *fmt);
+
+	void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
+			const struct intf_prog_fetch *fetch);
+
+	void (*enable_timing)(struct dpu_hw_intf *intf,
+			u8 enable);
+
+	void (*get_status)(struct dpu_hw_intf *intf,
+			struct intf_status *status);
+
+	void (*setup_misr)(struct dpu_hw_intf *intf,
+			bool enable, u32 frame_count);
+
+	u32 (*collect_misr)(struct dpu_hw_intf *intf);
+
+	u32 (*get_line_count)(struct dpu_hw_intf *intf);
+};
+
+struct dpu_hw_intf {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* intf */
+	enum dpu_intf idx;
+	const struct dpu_intf_cfg *cap;
+	const struct dpu_mdss_cfg *mdss;
+
+	/* ops */
+	struct dpu_hw_intf_ops ops;
+};
+
+/**
+ * to_dpu_hw_intf - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_intf, base);
+}
+
+/**
+ * dpu_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx:  interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intf_destroy(): Destroys INTF driver context
+ * @intf:   Pointer to INTF driver context
+ */
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+
+#endif /*_DPU_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
new file mode 100644
index 0000000..4ab72b0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -0,0 +1,261 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define LM_OP_MODE                        0x00
+#define LM_OUT_SIZE                       0x04
+#define LM_BORDER_COLOR_0                 0x08
+#define LM_BORDER_COLOR_1                 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP                     0x00
+#define LM_BLEND0_CONST_ALPHA            0x04
+#define LM_FG_COLOR_FILL_COLOR_0         0x08
+#define LM_FG_COLOR_FILL_COLOR_1         0x0C
+#define LM_FG_COLOR_FILL_SIZE            0x10
+#define LM_FG_COLOR_FILL_XY              0x14
+
+#define LM_BLEND0_FG_ALPHA               0x04
+#define LM_BLEND0_BG_ALPHA               0x08
+
+#define LM_MISR_CTRL			0x310
+#define LM_MISR_SIGNATURE		0x314
+
+static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->mixer_count; i++) {
+		if (mixer == m->mixer[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->mixer[i].base;
+			b->length = m->mixer[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_LM;
+			return &m->mixer[i];
+		}
+	}
+
+	return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @c:     mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
+{
+	const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
+	int rc;
+
+	if (stage == DPU_STAGE_BASE)
+		rc = -EINVAL;
+	else if (stage <= sblk->maxblendstages)
+		rc = sblk->blendstage_base[stage - DPU_STAGE_0];
+	else
+		rc = -EINVAL;
+
+	return rc;
+}
+
+static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
+		struct dpu_hw_mixer_cfg *mixer)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 outsize;
+	u32 op_mode;
+
+	op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+	outsize = mixer->out_height << 16 | mixer->out_width;
+	DPU_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+	/* SPLIT_LEFT_RIGHT */
+	if (mixer->right_mixer)
+		op_mode |= BIT(31);
+	else
+		op_mode &= ~BIT(31);
+	DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
+		struct dpu_mdss_color *color,
+		u8 border_en)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+	if (border_en) {
+		DPU_REG_WRITE(c, LM_BORDER_COLOR_0,
+			(color->color_0 & 0xFFF) |
+			((color->color_1 & 0xFFF) << 0x10));
+		DPU_REG_WRITE(c, LM_BORDER_COLOR_1,
+			(color->color_2 & 0xFFF) |
+			((color->color_3 & 0xFFF) << 0x10));
+	}
+}
+
+static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx,
+	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	int stage_off;
+	u32 const_alpha;
+
+	if (stage == DPU_STAGE_BASE)
+		return;
+
+	stage_off = _stage_offset(ctx, stage);
+	if (WARN_ON(stage_off < 0))
+		return;
+
+	const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+	DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+	DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
+	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	int stage_off;
+
+	if (stage == DPU_STAGE_BASE)
+		return;
+
+	stage_off = _stage_offset(ctx, stage);
+	if (WARN_ON(stage_off < 0))
+		return;
+
+	DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+	DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+	DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
+	uint32_t mixer_op_mode)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	int op_mode;
+
+	/* read the existing op_mode configuration */
+	op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+	op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+	DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
+			void *cfg)
+{
+}
+
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
+				bool enable, u32 frame_count)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 config = 0;
+
+	DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+	/* clear misr data */
+	wmb();
+
+	if (enable)
+		config = (frame_count & MISR_FRAME_COUNT_MASK) |
+			MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+	DPU_REG_WRITE(c, LM_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+	return DPU_REG_READ(c, LM_MISR_SIGNATURE);
+}
+
+static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
+		struct dpu_hw_lm_ops *ops,
+		unsigned long features)
+{
+	ops->setup_mixer_out = dpu_hw_lm_setup_out;
+	if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
+		ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
+	else
+		ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
+	ops->setup_alpha_out = dpu_hw_lm_setup_color3;
+	ops->setup_border_color = dpu_hw_lm_setup_border_color;
+	ops->setup_gc = dpu_hw_lm_gc;
+	ops->setup_misr = dpu_hw_lm_setup_misr;
+	ops->collect_misr = dpu_hw_lm_collect_misr;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_mixer *c;
+	struct dpu_lm_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _lm_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Assign ops */
+	c->idx = idx;
+	c->cap = cfg;
+	_setup_mixer_ops(m, &c->ops, c->cap->features);
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
+{
+	if (lm)
+		dpu_hw_blk_destroy(&lm->base);
+	kfree(lm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
new file mode 100644
index 0000000..e29e5da
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_LM_H
+#define _DPU_HW_LM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mixer;
+
+struct dpu_hw_mixer_cfg {
+	u32 out_width;
+	u32 out_height;
+	bool right_mixer;
+	int flags;
+};
+
+struct dpu_hw_color3_cfg {
+	u8 keep_fg[DPU_STAGE_MAX];
+};
+
+/**
+ *
+ * struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_lm_ops {
+	/*
+	 * Sets up mixer output width and height
+	 * and border color if enabled
+	 */
+	void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
+		struct dpu_hw_mixer_cfg *cfg);
+
+	/*
+	 * Alpha blending configuration
+	 * for the specified stage
+	 */
+	void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
+		uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+	/*
+	 * Alpha color component selection from either fg or bg
+	 */
+	void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
+
+	/**
+	 * setup_border_color : enable/disable border color
+	 */
+	void (*setup_border_color)(struct dpu_hw_mixer *ctx,
+		struct dpu_mdss_color *color,
+		u8 border_en);
+	/**
+	 * setup_gc : enable/disable gamma correction feature
+	 */
+	void (*setup_gc)(struct dpu_hw_mixer *mixer,
+			void *cfg);
+
+	/* setup_misr: enables/disables MISR in HW register */
+	void (*setup_misr)(struct dpu_hw_mixer *ctx,
+			bool enable, u32 frame_count);
+
+	/* collect_misr: reads and stores MISR data from HW register */
+	u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
+};
+
+struct dpu_hw_mixer {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* lm */
+	enum dpu_lm  idx;
+	const struct dpu_lm_cfg   *cap;
+	const struct dpu_mdp_cfg  *mdp;
+	const struct dpu_ctl_cfg  *ctl;
+
+	/* ops */
+	struct dpu_hw_lm_ops ops;
+
+	/* store mixer info specific to display */
+	struct dpu_hw_mixer_cfg cfg;
+};
+
+/**
+ * to_dpu_hw_mixer - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_mixer, base);
+}
+
+/**
+ * dpu_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx:  mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm:   Pointer to LM driver context
+ */
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+
+#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
new file mode 100644
index 0000000..35e6bf9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -0,0 +1,465 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_MDSS_H
+#define _DPU_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define DPU_DBG_NAME			"dpu"
+
+#define DPU_NONE                        0
+
+#ifndef DPU_CSC_MATRIX_COEFF_SIZE
+#define DPU_CSC_MATRIX_COEFF_SIZE	9
+#endif
+
+#ifndef DPU_CSC_CLAMP_SIZE
+#define DPU_CSC_CLAMP_SIZE		6
+#endif
+
+#ifndef DPU_CSC_BIAS_SIZE
+#define DPU_CSC_BIAS_SIZE		3
+#endif
+
+#ifndef DPU_MAX_PLANES
+#define DPU_MAX_PLANES			4
+#endif
+
+#define PIPES_PER_STAGE			2
+#ifndef DPU_MAX_DE_CURVES
+#define DPU_MAX_DE_CURVES		3
+#endif
+
+enum dpu_format_flags {
+	DPU_FORMAT_FLAG_YUV_BIT,
+	DPU_FORMAT_FLAG_DX_BIT,
+	DPU_FORMAT_FLAG_COMPRESSED_BIT,
+	DPU_FORMAT_FLAG_BIT_MAX,
+};
+
+#define DPU_FORMAT_FLAG_YUV		BIT(DPU_FORMAT_FLAG_YUV_BIT)
+#define DPU_FORMAT_FLAG_DX		BIT(DPU_FORMAT_FLAG_DX_BIT)
+#define DPU_FORMAT_FLAG_COMPRESSED	BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT)
+#define DPU_FORMAT_IS_YUV(X)		\
+	(test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag))
+#define DPU_FORMAT_IS_DX(X)		\
+	(test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag))
+#define DPU_FORMAT_IS_LINEAR(X)		((X)->fetch_mode == DPU_FETCH_LINEAR)
+#define DPU_FORMAT_IS_TILE(X) \
+	(((X)->fetch_mode == DPU_FETCH_UBWC) && \
+			!test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define DPU_FORMAT_IS_UBWC(X) \
+	(((X)->fetch_mode == DPU_FETCH_UBWC) && \
+			test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+
+#define DPU_BLEND_FG_ALPHA_FG_CONST	(0 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_CONST	(1 << 0)
+#define DPU_BLEND_FG_ALPHA_FG_PIXEL	(2 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_PIXEL	(3 << 0)
+#define DPU_BLEND_FG_INV_ALPHA		(1 << 2)
+#define DPU_BLEND_FG_MOD_ALPHA		(1 << 3)
+#define DPU_BLEND_FG_INV_MOD_ALPHA	(1 << 4)
+#define DPU_BLEND_FG_TRANSP_EN		(1 << 5)
+#define DPU_BLEND_BG_ALPHA_FG_CONST	(0 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_CONST	(1 << 8)
+#define DPU_BLEND_BG_ALPHA_FG_PIXEL	(2 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_PIXEL	(3 << 8)
+#define DPU_BLEND_BG_INV_ALPHA		(1 << 10)
+#define DPU_BLEND_BG_MOD_ALPHA		(1 << 11)
+#define DPU_BLEND_BG_INV_MOD_ALPHA	(1 << 12)
+#define DPU_BLEND_BG_TRANSP_EN		(1 << 13)
+
+#define DPU_VSYNC0_SOURCE_GPIO		0
+#define DPU_VSYNC1_SOURCE_GPIO		1
+#define DPU_VSYNC2_SOURCE_GPIO		2
+#define DPU_VSYNC_SOURCE_INTF_0		3
+#define DPU_VSYNC_SOURCE_INTF_1		4
+#define DPU_VSYNC_SOURCE_INTF_2		5
+#define DPU_VSYNC_SOURCE_INTF_3		6
+#define DPU_VSYNC_SOURCE_WD_TIMER_4	11
+#define DPU_VSYNC_SOURCE_WD_TIMER_3	12
+#define DPU_VSYNC_SOURCE_WD_TIMER_2	13
+#define DPU_VSYNC_SOURCE_WD_TIMER_1	14
+#define DPU_VSYNC_SOURCE_WD_TIMER_0	15
+
+enum dpu_hw_blk_type {
+	DPU_HW_BLK_TOP = 0,
+	DPU_HW_BLK_SSPP,
+	DPU_HW_BLK_LM,
+	DPU_HW_BLK_CTL,
+	DPU_HW_BLK_CDM,
+	DPU_HW_BLK_PINGPONG,
+	DPU_HW_BLK_INTF,
+	DPU_HW_BLK_WB,
+	DPU_HW_BLK_MAX,
+};
+
+enum dpu_mdp {
+	MDP_TOP = 0x1,
+	MDP_MAX,
+};
+
+enum dpu_sspp {
+	SSPP_NONE,
+	SSPP_VIG0,
+	SSPP_VIG1,
+	SSPP_VIG2,
+	SSPP_VIG3,
+	SSPP_RGB0,
+	SSPP_RGB1,
+	SSPP_RGB2,
+	SSPP_RGB3,
+	SSPP_DMA0,
+	SSPP_DMA1,
+	SSPP_DMA2,
+	SSPP_DMA3,
+	SSPP_CURSOR0,
+	SSPP_CURSOR1,
+	SSPP_MAX
+};
+
+enum dpu_sspp_type {
+	SSPP_TYPE_VIG,
+	SSPP_TYPE_RGB,
+	SSPP_TYPE_DMA,
+	SSPP_TYPE_CURSOR,
+	SSPP_TYPE_MAX
+};
+
+enum dpu_lm {
+	LM_0 = 1,
+	LM_1,
+	LM_2,
+	LM_3,
+	LM_4,
+	LM_5,
+	LM_6,
+	LM_MAX
+};
+
+enum dpu_stage {
+	DPU_STAGE_BASE = 0,
+	DPU_STAGE_0,
+	DPU_STAGE_1,
+	DPU_STAGE_2,
+	DPU_STAGE_3,
+	DPU_STAGE_4,
+	DPU_STAGE_5,
+	DPU_STAGE_6,
+	DPU_STAGE_7,
+	DPU_STAGE_8,
+	DPU_STAGE_9,
+	DPU_STAGE_10,
+	DPU_STAGE_MAX
+};
+enum dpu_dspp {
+	DSPP_0 = 1,
+	DSPP_1,
+	DSPP_2,
+	DSPP_3,
+	DSPP_MAX
+};
+
+enum dpu_ds {
+	DS_TOP,
+	DS_0,
+	DS_1,
+	DS_MAX
+};
+
+enum dpu_ctl {
+	CTL_0 = 1,
+	CTL_1,
+	CTL_2,
+	CTL_3,
+	CTL_4,
+	CTL_MAX
+};
+
+enum dpu_cdm {
+	CDM_0 = 1,
+	CDM_1,
+	CDM_MAX
+};
+
+enum dpu_pingpong {
+	PINGPONG_0 = 1,
+	PINGPONG_1,
+	PINGPONG_2,
+	PINGPONG_3,
+	PINGPONG_4,
+	PINGPONG_S0,
+	PINGPONG_MAX
+};
+
+enum dpu_intf {
+	INTF_0 = 1,
+	INTF_1,
+	INTF_2,
+	INTF_3,
+	INTF_4,
+	INTF_5,
+	INTF_6,
+	INTF_MAX
+};
+
+enum dpu_intf_type {
+	INTF_NONE = 0x0,
+	INTF_DSI = 0x1,
+	INTF_HDMI = 0x3,
+	INTF_LCDC = 0x5,
+	INTF_EDP = 0x9,
+	INTF_DP = 0xa,
+	INTF_TYPE_MAX,
+
+	/* virtual interfaces */
+	INTF_WB = 0x100,
+};
+
+enum dpu_intf_mode {
+	INTF_MODE_NONE = 0,
+	INTF_MODE_CMD,
+	INTF_MODE_VIDEO,
+	INTF_MODE_WB_BLOCK,
+	INTF_MODE_WB_LINE,
+	INTF_MODE_MAX
+};
+
+enum dpu_wb {
+	WB_0 = 1,
+	WB_1,
+	WB_2,
+	WB_3,
+	WB_MAX
+};
+
+enum dpu_ad {
+	AD_0 = 0x1,
+	AD_1,
+	AD_MAX
+};
+
+enum dpu_cwb {
+	CWB_0 = 0x1,
+	CWB_1,
+	CWB_2,
+	CWB_3,
+	CWB_MAX
+};
+
+enum dpu_wd_timer {
+	WD_TIMER_0 = 0x1,
+	WD_TIMER_1,
+	WD_TIMER_2,
+	WD_TIMER_3,
+	WD_TIMER_4,
+	WD_TIMER_5,
+	WD_TIMER_MAX
+};
+
+enum dpu_vbif {
+	VBIF_0,
+	VBIF_1,
+	VBIF_MAX,
+	VBIF_RT = VBIF_0,
+	VBIF_NRT = VBIF_1
+};
+
+enum dpu_iommu_domain {
+	DPU_IOMMU_DOMAIN_UNSECURE,
+	DPU_IOMMU_DOMAIN_SECURE,
+	DPU_IOMMU_DOMAIN_MAX
+};
+
+/**
+ * DPU HW,Component order color map
+ */
+enum {
+	C0_G_Y = 0,
+	C1_B_Cb = 1,
+	C2_R_Cr = 2,
+	C3_ALPHA = 3
+};
+
+/**
+ * enum dpu_plane_type - defines how the color component pixel packing
+ * @DPU_PLANE_INTERLEAVED   : Color components in single plane
+ * @DPU_PLANE_PLANAR        : Color component in separate planes
+ * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum dpu_plane_type {
+	DPU_PLANE_INTERLEAVED,
+	DPU_PLANE_PLANAR,
+	DPU_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum dpu_chroma_samp_type - chroma sub-samplng type
+ * @DPU_CHROMA_RGB   : No chroma subsampling
+ * @DPU_CHROMA_H2V1  : Chroma pixels are horizontally subsampled
+ * @DPU_CHROMA_H1V2  : Chroma pixels are vertically subsampled
+ * @DPU_CHROMA_420   : 420 subsampling
+ */
+enum dpu_chroma_samp_type {
+	DPU_CHROMA_RGB,
+	DPU_CHROMA_H2V1,
+	DPU_CHROMA_H1V2,
+	DPU_CHROMA_420
+};
+
+/**
+ * dpu_fetch_type - Defines How DPU HW fetches data
+ * @DPU_FETCH_LINEAR   : fetch is line by line
+ * @DPU_FETCH_TILE     : fetches data in Z order from a tile
+ * @DPU_FETCH_UBWC     : fetch and decompress data
+ */
+enum dpu_fetch_type {
+	DPU_FETCH_LINEAR,
+	DPU_FETCH_TILE,
+	DPU_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+	COLOR_ALPHA_1BIT = 0,
+	COLOR_ALPHA_4BIT = 1,
+	COLOR_4BIT = 0,
+	COLOR_5BIT = 1, /* No 5-bit Alpha */
+	COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+	COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum dpu_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE      : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT   : column interleaving
+ * @BLEND_3D_MAX       :
+ */
+enum dpu_3d_blend_mode {
+	BLEND_3D_NONE = 0,
+	BLEND_3D_FRAME_INT,
+	BLEND_3D_H_ROW_INT,
+	BLEND_3D_V_ROW_INT,
+	BLEND_3D_COL_INT,
+	BLEND_3D_MAX
+};
+
+/** struct dpu_format - defines the format configuration which
+ * allows DPU HW to correctly fetch and decode the format
+ * @base: base msm_format struture containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @is_yuv: is format a yuv variant
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct dpu_format {
+	struct msm_format base;
+	enum dpu_plane_type fetch_planes;
+	u8 element[DPU_MAX_PLANES];
+	u8 bits[DPU_MAX_PLANES];
+	enum dpu_chroma_samp_type chroma_sample;
+	u8 unpack_align_msb;
+	u8 unpack_tight;
+	u8 unpack_count;
+	u8 bpp;
+	u8 alpha_enable;
+	u8 num_planes;
+	enum dpu_fetch_type fetch_mode;
+	DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX);
+	u16 tile_width;
+	u16 tile_height;
+};
+#define to_dpu_format(x) container_of(x, struct dpu_format, base)
+
+/**
+ * struct dpu_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct dpu_hw_fmt_layout {
+	const struct dpu_format *format;
+	uint32_t num_planes;
+	uint32_t width;
+	uint32_t height;
+	uint32_t total_size;
+	uint32_t plane_addr[DPU_MAX_PLANES];
+	uint32_t plane_size[DPU_MAX_PLANES];
+	uint32_t plane_pitch[DPU_MAX_PLANES];
+};
+
+struct dpu_csc_cfg {
+	/* matrix coefficients in S15.16 format */
+	uint32_t csc_mv[DPU_CSC_MATRIX_COEFF_SIZE];
+	uint32_t csc_pre_bv[DPU_CSC_BIAS_SIZE];
+	uint32_t csc_post_bv[DPU_CSC_BIAS_SIZE];
+	uint32_t csc_pre_lv[DPU_CSC_CLAMP_SIZE];
+	uint32_t csc_post_lv[DPU_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct dpu_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct dpu_mdss_color {
+	u32 color_0;
+	u32 color_1;
+	u32 color_2;
+	u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define DPU_DBG_MASK_NONE     (1 << 0)
+#define DPU_DBG_MASK_CDM      (1 << 1)
+#define DPU_DBG_MASK_INTF     (1 << 2)
+#define DPU_DBG_MASK_LM       (1 << 3)
+#define DPU_DBG_MASK_CTL      (1 << 4)
+#define DPU_DBG_MASK_PINGPONG (1 << 5)
+#define DPU_DBG_MASK_SSPP     (1 << 6)
+#define DPU_DBG_MASK_WB       (1 << 7)
+#define DPU_DBG_MASK_TOP      (1 << 8)
+#define DPU_DBG_MASK_VBIF     (1 << 9)
+#define DPU_DBG_MASK_ROT      (1 << 10)
+
+#endif  /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
new file mode 100644
index 0000000..cc3a623
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -0,0 +1,250 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iopoll.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define PP_TEAR_CHECK_EN                0x000
+#define PP_SYNC_CONFIG_VSYNC            0x004
+#define PP_SYNC_CONFIG_HEIGHT           0x008
+#define PP_SYNC_WRCOUNT                 0x00C
+#define PP_VSYNC_INIT_VAL               0x010
+#define PP_INT_COUNT_VAL                0x014
+#define PP_SYNC_THRESH                  0x018
+#define PP_START_POS                    0x01C
+#define PP_RD_PTR_IRQ                   0x020
+#define PP_WR_PTR_IRQ                   0x024
+#define PP_OUT_LINE_COUNT               0x028
+#define PP_LINE_COUNT                   0x02C
+
+#define PP_FBC_MODE                     0x034
+#define PP_FBC_BUDGET_CTL               0x038
+#define PP_FBC_LOSSY_MODE               0x03C
+
+static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->pingpong_count; i++) {
+		if (pp == m->pingpong[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->pingpong[i].base;
+			b->length = m->pingpong[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_PINGPONG;
+			return &m->pingpong[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
+		struct dpu_hw_tear_check *te)
+{
+	struct dpu_hw_blk_reg_map *c;
+	int cfg;
+
+	if (!pp || !te)
+		return -EINVAL;
+	c = &pp->hw;
+
+	cfg = BIT(19); /*VSYNC_COUNTER_EN */
+	if (te->hw_vsync_mode)
+		cfg |= BIT(20);
+
+	cfg |= te->vsync_count;
+
+	DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+	DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+	DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+	DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+	DPU_REG_WRITE(c, PP_START_POS, te->start_pos);
+	DPU_REG_WRITE(c, PP_SYNC_THRESH,
+			((te->sync_threshold_continue << 16) |
+			 te->sync_threshold_start));
+	DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
+			(te->start_pos + te->sync_threshold_start + 1));
+
+	return 0;
+}
+
+static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
+		u32 timeout_us)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 val;
+	int rc;
+
+	if (!pp)
+		return -EINVAL;
+
+	c = &pp->hw;
+	rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
+			val, (val & 0xffff) >= 1, 10, timeout_us);
+
+	return rc;
+}
+
+static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable)
+{
+	struct dpu_hw_blk_reg_map *c;
+
+	if (!pp)
+		return -EINVAL;
+	c = &pp->hw;
+
+	DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+	return 0;
+}
+
+static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp,
+		bool enable_external_te)
+{
+	struct dpu_hw_blk_reg_map *c = &pp->hw;
+	u32 cfg;
+	int orig;
+
+	if (!pp)
+		return -EINVAL;
+
+	c = &pp->hw;
+	cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
+	orig = (bool)(cfg & BIT(20));
+	if (enable_external_te)
+		cfg |= BIT(20);
+	else
+		cfg &= ~BIT(20);
+	DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+	trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg);
+
+	return orig;
+}
+
+static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp,
+		struct dpu_hw_pp_vsync_info *info)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 val;
+
+	if (!pp || !info)
+		return -EINVAL;
+	c = &pp->hw;
+
+	val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL);
+	info->rd_ptr_init_val = val & 0xffff;
+
+	val = DPU_REG_READ(c, PP_INT_COUNT_VAL);
+	info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
+	info->rd_ptr_line_count = val & 0xffff;
+
+	val = DPU_REG_READ(c, PP_LINE_COUNT);
+	info->wr_ptr_line_count = val & 0xffff;
+
+	return 0;
+}
+
+static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
+{
+	struct dpu_hw_blk_reg_map *c = &pp->hw;
+	u32 height, init;
+	u32 line = 0xFFFF;
+
+	if (!pp)
+		return 0;
+	c = &pp->hw;
+
+	init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
+	height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
+
+	if (height < init)
+		goto line_count_exit;
+
+	line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
+
+	if (line < init)
+		line += (0xFFFF - init);
+	else
+		line -= init;
+
+line_count_exit:
+	return line;
+}
+
+static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
+	const struct dpu_pingpong_cfg *hw_cap)
+{
+	ops->setup_tearcheck = dpu_hw_pp_setup_te_config;
+	ops->enable_tearcheck = dpu_hw_pp_enable_te;
+	ops->connect_external_te = dpu_hw_pp_connect_external_te;
+	ops->get_vsync_info = dpu_hw_pp_get_vsync_info;
+	ops->poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
+	ops->get_line_count = dpu_hw_pp_get_line_count;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_pingpong *c;
+	struct dpu_pingpong_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _pingpong_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->idx = idx;
+	c->caps = cfg;
+	_setup_pingpong_ops(&c->ops, c->caps);
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
+{
+	if (pp)
+		dpu_hw_blk_destroy(&pp->base);
+	kfree(pp);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
new file mode 100644
index 0000000..3caccd7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_PINGPONG_H
+#define _DPU_HW_PINGPONG_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_pingpong;
+
+struct dpu_hw_tear_check {
+	/*
+	 * This is ratio of MDP VSYNC clk freq(Hz) to
+	 * refresh rate divided by no of lines
+	 */
+	u32 vsync_count;
+	u32 sync_cfg_height;
+	u32 vsync_init_val;
+	u32 sync_threshold_start;
+	u32 sync_threshold_continue;
+	u32 start_pos;
+	u32 rd_ptr_irq;
+	u8 hw_vsync_mode;
+};
+
+struct dpu_hw_pp_vsync_info {
+	u32 rd_ptr_init_val;	/* value of rd pointer at vsync edge */
+	u32 rd_ptr_frame_count;	/* num frames sent since enabling interface */
+	u32 rd_ptr_line_count;	/* current line on panel (rd ptr) */
+	u32 wr_ptr_line_count;	/* current line within pp fifo (wr ptr) */
+};
+
+/**
+ *
+ * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ *  @setup_tearcheck : program tear check values
+ *  @enable_tearcheck : enables tear check
+ *  @get_vsync_info : retries timing info of the panel
+ *  @setup_dither : function to program the dither hw block
+ *  @get_line_count: obtain current vertical line counter
+ */
+struct dpu_hw_pingpong_ops {
+	/**
+	 * enables vysnc generation and sets up init value of
+	 * read pointer and programs the tear check cofiguration
+	 */
+	int (*setup_tearcheck)(struct dpu_hw_pingpong *pp,
+			struct dpu_hw_tear_check *cfg);
+
+	/**
+	 * enables tear check block
+	 */
+	int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
+			bool enable);
+
+	/**
+	 * read, modify, write to either set or clear listening to external TE
+	 * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
+	 */
+	int (*connect_external_te)(struct dpu_hw_pingpong *pp,
+			bool enable_external_te);
+
+	/**
+	 * provides the programmed and current
+	 * line_count
+	 */
+	int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
+			struct dpu_hw_pp_vsync_info  *info);
+
+	/**
+	 * poll until write pointer transmission starts
+	 * @Return: 0 on success, -ETIMEDOUT on timeout
+	 */
+	int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us);
+
+	/**
+	 * Obtain current vertical line counter
+	 */
+	u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
+};
+
+struct dpu_hw_pingpong {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* pingpong */
+	enum dpu_pingpong idx;
+	const struct dpu_pingpong_cfg *caps;
+
+	/* ops */
+	struct dpu_hw_pingpong_ops ops;
+};
+
+/**
+ * dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
+ * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
+ *	pingpong idx.
+ * @idx:  Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m:    Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_pingpong context
+ */
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_pingpong_destroy - destroys pingpong driver context
+ *	should be called to free the context
+ * @pp:   Pointer to PP driver context returned by dpu_hw_pingpong_init
+ */
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+
+#endif /*_DPU_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
new file mode 100644
index 0000000..c25b52a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -0,0 +1,753 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define DPU_FETCH_CONFIG_RESET_VALUE   0x00000087
+
+/* DPU_SSPP_SRC */
+#define SSPP_SRC_SIZE                      0x00
+#define SSPP_SRC_XY                        0x08
+#define SSPP_OUT_SIZE                      0x0c
+#define SSPP_OUT_XY                        0x10
+#define SSPP_SRC0_ADDR                     0x14
+#define SSPP_SRC1_ADDR                     0x18
+#define SSPP_SRC2_ADDR                     0x1C
+#define SSPP_SRC3_ADDR                     0x20
+#define SSPP_SRC_YSTRIDE0                  0x24
+#define SSPP_SRC_YSTRIDE1                  0x28
+#define SSPP_SRC_FORMAT                    0x30
+#define SSPP_SRC_UNPACK_PATTERN            0x34
+#define SSPP_SRC_OP_MODE                   0x38
+
+/* SSPP_MULTIRECT*/
+#define SSPP_SRC_SIZE_REC1                 0x16C
+#define SSPP_SRC_XY_REC1                   0x168
+#define SSPP_OUT_SIZE_REC1                 0x160
+#define SSPP_OUT_XY_REC1                   0x164
+#define SSPP_SRC_FORMAT_REC1               0x174
+#define SSPP_SRC_UNPACK_PATTERN_REC1       0x178
+#define SSPP_SRC_OP_MODE_REC1              0x17C
+#define SSPP_MULTIRECT_OPMODE              0x170
+#define SSPP_SRC_CONSTANT_COLOR_REC1       0x180
+#define SSPP_EXCL_REC_SIZE_REC1            0x184
+#define SSPP_EXCL_REC_XY_REC1              0x188
+
+#define MDSS_MDP_OP_DEINTERLACE            BIT(22)
+#define MDSS_MDP_OP_DEINTERLACE_ODD        BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1              BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0              BIT(17)
+#define MDSS_MDP_OP_IGC_EN                 BIT(16)
+#define MDSS_MDP_OP_FLIP_UD                BIT(14)
+#define MDSS_MDP_OP_FLIP_LR                BIT(13)
+#define MDSS_MDP_OP_BWC_EN                 BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE            BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS           (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH             (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED              (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR            0x3c
+#define SSPP_EXCL_REC_CTL                  0x40
+#define SSPP_UBWC_STATIC_CTRL              0x44
+#define SSPP_FETCH_CONFIG                  0x048
+#define SSPP_DANGER_LUT                    0x60
+#define SSPP_SAFE_LUT                      0x64
+#define SSPP_CREQ_LUT                      0x68
+#define SSPP_QOS_CTRL                      0x6C
+#define SSPP_DECIMATION_CONFIG             0xB4
+#define SSPP_SRC_ADDR_SW_STATUS            0x70
+#define SSPP_CREQ_LUT_0                    0x74
+#define SSPP_CREQ_LUT_1                    0x78
+#define SSPP_SW_PIX_EXT_C0_LR              0x100
+#define SSPP_SW_PIX_EXT_C0_TB              0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS      0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR            0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB            0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS    0x118
+#define SSPP_SW_PIX_EXT_C3_LR              0x120
+#define SSPP_SW_PIX_EXT_C3_TB              0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS      0x128
+#define SSPP_TRAFFIC_SHAPER                0x130
+#define SSPP_CDP_CNTL                      0x134
+#define SSPP_UBWC_ERROR_STATUS             0x138
+#define SSPP_TRAFFIC_SHAPER_PREFILL        0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL   0x154
+#define SSPP_TRAFFIC_SHAPER_REC1           0x158
+#define SSPP_EXCL_REC_SIZE                 0x1B4
+#define SSPP_EXCL_REC_XY                   0x1B8
+#define SSPP_VIG_OP_MODE                   0x0
+#define SSPP_VIG_CSC_10_OP_MODE            0x0
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX        0xFF
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN            BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN       BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK   0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF    4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK     0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF      20
+
+/* DPU_SSPP_SCALER_QSEED2 */
+#define SCALE_CONFIG                       0x04
+#define COMP0_3_PHASE_STEP_X               0x10
+#define COMP0_3_PHASE_STEP_Y               0x14
+#define COMP1_2_PHASE_STEP_X               0x18
+#define COMP1_2_PHASE_STEP_Y               0x1c
+#define COMP0_3_INIT_PHASE_X               0x20
+#define COMP0_3_INIT_PHASE_Y               0x24
+#define COMP1_2_INIT_PHASE_X               0x28
+#define COMP1_2_INIT_PHASE_Y               0x2C
+#define VIG_0_QSEED2_SHARP                 0x30
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN          BIT(17)
+#define VIG_OP_MEM_PROT_CONT   BIT(15)
+#define VIG_OP_MEM_PROT_VAL    BIT(14)
+#define VIG_OP_MEM_PROT_SAT    BIT(13)
+#define VIG_OP_MEM_PROT_HUE    BIT(12)
+#define VIG_OP_HIST            BIT(8)
+#define VIG_OP_SKY_COL         BIT(7)
+#define VIG_OP_FOIL            BIT(6)
+#define VIG_OP_SKIN_COL        BIT(5)
+#define VIG_OP_PA_EN           BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND  BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN          BIT(0)
+#define CSC_10BIT_OFFSET       4
+
+/* traffic shaper clock in Hz */
+#define TS_CLK			19200000
+
+static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+		int s_id,
+		u32 *idx)
+{
+	int rc = 0;
+	const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+
+	if (!ctx)
+		return -EINVAL;
+
+	switch (s_id) {
+	case DPU_SSPP_SRC:
+		*idx = sblk->src_blk.base;
+		break;
+	case DPU_SSPP_SCALER_QSEED2:
+	case DPU_SSPP_SCALER_QSEED3:
+	case DPU_SSPP_SCALER_RGB:
+		*idx = sblk->scaler_blk.base;
+		break;
+	case DPU_SSPP_CSC:
+	case DPU_SSPP_CSC_10BIT:
+		*idx = sblk->csc_blk.base;
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static void dpu_hw_sspp_setup_multirect(struct dpu_hw_pipe *ctx,
+		enum dpu_sspp_multirect_index index,
+		enum dpu_sspp_multirect_mode mode)
+{
+	u32 mode_mask;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (index == DPU_SSPP_RECT_SOLO) {
+		/**
+		 * if rect index is RECT_SOLO, we cannot expect a
+		 * virtual plane sharing the same SSPP id. So we go
+		 * and disable multirect
+		 */
+		mode_mask = 0;
+	} else {
+		mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
+		mode_mask |= index;
+		if (mode == DPU_SSPP_MULTIRECT_TIME_MX)
+			mode_mask |= BIT(2);
+		else
+			mode_mask &= ~BIT(2);
+	}
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
+}
+
+static void _sspp_setup_opmode(struct dpu_hw_pipe *ctx,
+		u32 mask, u8 en)
+{
+	u32 idx;
+	u32 opmode;
+
+	if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+		_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED2, &idx) ||
+		!test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+		return;
+
+	opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+
+	if (en)
+		opmode |= mask;
+	else
+		opmode &= ~mask;
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx,
+		u32 mask, u8 en)
+{
+	u32 idx;
+	u32 opmode;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC_10BIT, &idx))
+		return;
+
+	opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+	if (en)
+		opmode |= mask;
+	else
+		opmode &= ~mask;
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
+/**
+ * Setup source pixel format, flip,
+ */
+static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
+		const struct dpu_format *fmt, u32 flags,
+		enum dpu_sspp_multirect_index rect_mode)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 chroma_samp, unpack, src_format;
+	u32 opmode = 0;
+	u32 fast_clear = 0;
+	u32 op_mode_off, unpack_pat_off, format_off;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !fmt)
+		return;
+
+	if (rect_mode == DPU_SSPP_RECT_SOLO || rect_mode == DPU_SSPP_RECT_0) {
+		op_mode_off = SSPP_SRC_OP_MODE;
+		unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
+		format_off = SSPP_SRC_FORMAT;
+	} else {
+		op_mode_off = SSPP_SRC_OP_MODE_REC1;
+		unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
+		format_off = SSPP_SRC_FORMAT_REC1;
+	}
+
+	c = &ctx->hw;
+	opmode = DPU_REG_READ(c, op_mode_off + idx);
+	opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+			MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+	if (flags & DPU_SSPP_FLIP_LR)
+		opmode |= MDSS_MDP_OP_FLIP_LR;
+	if (flags & DPU_SSPP_FLIP_UD)
+		opmode |= MDSS_MDP_OP_FLIP_UD;
+
+	chroma_samp = fmt->chroma_sample;
+	if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
+		if (chroma_samp == DPU_CHROMA_H2V1)
+			chroma_samp = DPU_CHROMA_H1V2;
+		else if (chroma_samp == DPU_CHROMA_H1V2)
+			chroma_samp = DPU_CHROMA_H2V1;
+	}
+
+	src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+		(fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+		(fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+	if (flags & DPU_SSPP_ROT_90)
+		src_format |= BIT(11); /* ROT90 */
+
+	if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
+		src_format |= BIT(8); /* SRCC3_EN */
+
+	if (flags & DPU_SSPP_SOLID_FILL)
+		src_format |= BIT(22);
+
+	unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+		(fmt->element[1] << 8) | (fmt->element[0] << 0);
+	src_format |= ((fmt->unpack_count - 1) << 12) |
+		(fmt->unpack_tight << 17) |
+		(fmt->unpack_align_msb << 18) |
+		((fmt->bpp - 1) << 9);
+
+	if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
+		if (DPU_FORMAT_IS_UBWC(fmt))
+			opmode |= MDSS_MDP_OP_BWC_EN;
+		src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+		DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
+			DPU_FETCH_CONFIG_RESET_VALUE |
+			ctx->mdp->highest_bank_bit << 18);
+		if (IS_UBWC_20_SUPPORTED(ctx->catalog->caps->ubwc_version)) {
+			fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+			DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+					fast_clear | (ctx->mdp->ubwc_swizzle) |
+					(ctx->mdp->highest_bank_bit << 4));
+		}
+	}
+
+	opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+	/* if this is YUV pixel format, enable CSC */
+	if (DPU_FORMAT_IS_YUV(fmt))
+		src_format |= BIT(15);
+
+	if (DPU_FORMAT_IS_DX(fmt))
+		src_format |= BIT(14);
+
+	/* update scaler opmode, if appropriate */
+	if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+		_sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+			DPU_FORMAT_IS_YUV(fmt));
+	else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
+		_sspp_setup_csc10_opmode(ctx,
+			VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+			DPU_FORMAT_IS_YUV(fmt));
+
+	DPU_REG_WRITE(c, format_off + idx, src_format);
+	DPU_REG_WRITE(c, unpack_pat_off + idx, unpack);
+	DPU_REG_WRITE(c, op_mode_off + idx, opmode);
+
+	/* clear previous UBWC error */
+	DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pixel_ext *pe_ext)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u8 color;
+	u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+	const u32 bytemask = 0xff;
+	const u32 shortmask = 0xffff;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !pe_ext)
+		return;
+
+	c = &ctx->hw;
+
+	/* program SW pixel extension override for all pipes*/
+	for (color = 0; color < DPU_MAX_PLANES; color++) {
+		/* color 2 has the same set of registers as color 1 */
+		if (color == 2)
+			continue;
+
+		lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+			((pe_ext->right_rpt[color] & bytemask) << 16)|
+			((pe_ext->left_ftch[color] & bytemask) << 8)|
+			(pe_ext->left_rpt[color] & bytemask);
+
+		tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+			((pe_ext->btm_rpt[color] & bytemask) << 16)|
+			((pe_ext->top_ftch[color] & bytemask) << 8)|
+			(pe_ext->top_rpt[color] & bytemask);
+
+		tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+			pe_ext->num_ext_pxls_top[color] +
+			pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+			((pe_ext->roi_w[color] +
+			pe_ext->num_ext_pxls_left[color] +
+			pe_ext->num_ext_pxls_right[color]) & shortmask);
+	}
+
+	/* color 0 */
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+			tot_req_pixels[0]);
+
+	/* color 1 and color 2 */
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+			tot_req_pixels[1]);
+
+	/* color 3 */
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+			tot_req_pixels[3]);
+}
+
+static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cfg *sspp,
+		struct dpu_hw_pixel_ext *pe,
+		void *scaler_cfg)
+{
+	u32 idx;
+	struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+	(void)pe;
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
+		|| !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+		return;
+
+	dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
+			ctx->cap->sblk->scaler_blk.version,
+			sspp->layout.format);
+}
+
+static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx)
+{
+	u32 idx;
+
+	if (!ctx || _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx))
+		return 0;
+
+	return dpu_hw_get_scaler3_ver(&ctx->hw, idx);
+}
+
+/**
+ * dpu_hw_sspp_setup_rects()
+ */
+static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cfg *cfg,
+		enum dpu_sspp_multirect_index rect_index)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+	u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !cfg)
+		return;
+
+	c = &ctx->hw;
+
+	if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
+		src_size_off = SSPP_SRC_SIZE;
+		src_xy_off = SSPP_SRC_XY;
+		out_size_off = SSPP_OUT_SIZE;
+		out_xy_off = SSPP_OUT_XY;
+	} else {
+		src_size_off = SSPP_SRC_SIZE_REC1;
+		src_xy_off = SSPP_SRC_XY_REC1;
+		out_size_off = SSPP_OUT_SIZE_REC1;
+		out_xy_off = SSPP_OUT_XY_REC1;
+	}
+
+
+	/* src and dest rect programming */
+	src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1;
+	src_size = (drm_rect_height(&cfg->src_rect) << 16) |
+		   drm_rect_width(&cfg->src_rect);
+	dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1;
+	dst_size = (drm_rect_height(&cfg->dst_rect) << 16) |
+		drm_rect_width(&cfg->dst_rect);
+
+	if (rect_index == DPU_SSPP_RECT_SOLO) {
+		ystride0 = (cfg->layout.plane_pitch[0]) |
+			(cfg->layout.plane_pitch[1] << 16);
+		ystride1 = (cfg->layout.plane_pitch[2]) |
+			(cfg->layout.plane_pitch[3] << 16);
+	} else {
+		ystride0 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx);
+		ystride1 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx);
+
+		if (rect_index == DPU_SSPP_RECT_0) {
+			ystride0 = (ystride0 & 0xFFFF0000) |
+				(cfg->layout.plane_pitch[0] & 0x0000FFFF);
+			ystride1 = (ystride1 & 0xFFFF0000)|
+				(cfg->layout.plane_pitch[2] & 0x0000FFFF);
+		} else {
+			ystride0 = (ystride0 & 0x0000FFFF) |
+				((cfg->layout.plane_pitch[0] << 16) &
+				 0xFFFF0000);
+			ystride1 = (ystride1 & 0x0000FFFF) |
+				((cfg->layout.plane_pitch[2] << 16) &
+				 0xFFFF0000);
+		}
+	}
+
+	/* rectangle register programming */
+	DPU_REG_WRITE(c, src_size_off + idx, src_size);
+	DPU_REG_WRITE(c, src_xy_off + idx, src_xy);
+	DPU_REG_WRITE(c, out_size_off + idx, dst_size);
+	DPU_REG_WRITE(c, out_xy_off + idx, dst_xy);
+
+	DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+	DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+}
+
+static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cfg *cfg,
+		enum dpu_sspp_multirect_index rect_mode)
+{
+	int i;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (rect_mode == DPU_SSPP_RECT_SOLO) {
+		for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+			DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+					cfg->layout.plane_addr[i]);
+	} else if (rect_mode == DPU_SSPP_RECT_0) {
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
+				cfg->layout.plane_addr[0]);
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
+				cfg->layout.plane_addr[2]);
+	} else {
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
+				cfg->layout.plane_addr[0]);
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
+				cfg->layout.plane_addr[2]);
+	}
+}
+
+static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
+		struct dpu_csc_cfg *data)
+{
+	u32 idx;
+	bool csc10 = false;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC, &idx) || !data)
+		return;
+
+	if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
+		idx += CSC_10BIT_OFFSET;
+		csc10 = true;
+	}
+
+	dpu_hw_csc_setup(&ctx->hw, idx, data, csc10);
+}
+
+static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
+		dpu_sspp_multirect_index rect_index)
+{
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0)
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+	else
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
+				color);
+}
+
+static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_qos_cfg *cfg)
+{
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
+	DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+}
+
+static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_qos_cfg *cfg)
+{
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
+		DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+		DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
+				cfg->creq_lut >> 32);
+	} else {
+		DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+	}
+}
+
+static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_qos_cfg *cfg)
+{
+	u32 idx;
+	u32 qos_ctrl = 0;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (cfg->vblank_en) {
+		qos_ctrl |= ((cfg->creq_vblank &
+				SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
+				SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
+		qos_ctrl |= ((cfg->danger_vblank &
+				SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
+				SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
+		qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
+	}
+
+	if (cfg->danger_safe_en)
+		qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
+}
+
+static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cdp_cfg *cfg)
+{
+	u32 idx;
+	u32 cdp_cntl = 0;
+
+	if (!ctx || !cfg)
+		return;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (cfg->enable)
+		cdp_cntl |= BIT(0);
+	if (cfg->ubwc_meta_enable)
+		cdp_cntl |= BIT(1);
+	if (cfg->tile_amortize_enable)
+		cdp_cntl |= BIT(2);
+	if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
+		cdp_cntl |= BIT(3);
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+}
+
+static void _setup_layer_ops(struct dpu_hw_pipe *c,
+		unsigned long features)
+{
+	if (test_bit(DPU_SSPP_SRC, &features)) {
+		c->ops.setup_format = dpu_hw_sspp_setup_format;
+		c->ops.setup_rects = dpu_hw_sspp_setup_rects;
+		c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
+		c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
+		c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
+	}
+
+	if (test_bit(DPU_SSPP_QOS, &features)) {
+		c->ops.setup_danger_safe_lut =
+			dpu_hw_sspp_setup_danger_safe_lut;
+		c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut;
+		c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
+	}
+
+	if (test_bit(DPU_SSPP_CSC, &features) ||
+		test_bit(DPU_SSPP_CSC_10BIT, &features))
+		c->ops.setup_csc = dpu_hw_sspp_setup_csc;
+
+	if (dpu_hw_sspp_multirect_enabled(c->cap))
+		c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
+
+	if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
+		c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
+		c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
+	}
+
+	if (test_bit(DPU_SSPP_CDP, &features))
+		c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
+}
+
+static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *catalog,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	if ((sspp < SSPP_MAX) && catalog && addr && b) {
+		for (i = 0; i < catalog->sspp_count; i++) {
+			if (sspp == catalog->sspp[i].id) {
+				b->base_off = addr;
+				b->blk_off = catalog->sspp[i].base;
+				b->length = catalog->sspp[i].len;
+				b->hwversion = catalog->hwversion;
+				b->log_mask = DPU_DBG_MASK_SSPP;
+				return &catalog->sspp[i];
+			}
+		}
+	}
+
+	return ERR_PTR(-ENOMEM);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+		void __iomem *addr, struct dpu_mdss_cfg *catalog,
+		bool is_virtual_pipe)
+{
+	struct dpu_hw_pipe *hw_pipe;
+	struct dpu_sspp_cfg *cfg;
+	int rc;
+
+	if (!addr || !catalog)
+		return ERR_PTR(-EINVAL);
+
+	hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+	if (!hw_pipe)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(hw_pipe);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Assign ops */
+	hw_pipe->catalog = catalog;
+	hw_pipe->mdp = &catalog->mdp[0];
+	hw_pipe->idx = idx;
+	hw_pipe->cap = cfg;
+	_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+
+	rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return hw_pipe;
+
+blk_init_error:
+	kzfree(hw_pipe);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
+{
+	if (ctx)
+		dpu_hw_blk_destroy(&ctx->base);
+	kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
new file mode 100644
index 0000000..4d81e5f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -0,0 +1,424 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_SSPP_H
+#define _DPU_HW_SSPP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+#include "dpu_formats.h"
+
+struct dpu_hw_pipe;
+
+/**
+ * Flags
+ */
+#define DPU_SSPP_FLIP_LR		BIT(0)
+#define DPU_SSPP_FLIP_UD		BIT(1)
+#define DPU_SSPP_SOURCE_ROTATED_90	BIT(2)
+#define DPU_SSPP_ROT_90			BIT(3)
+#define DPU_SSPP_SOLID_FILL		BIT(4)
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
+	(1UL << DPU_SSPP_SCALER_QSEED2) | \
+	(1UL << DPU_SSPP_SCALER_QSEED3))
+
+/**
+ * Component indices
+ */
+enum {
+	DPU_SSPP_COMP_0,
+	DPU_SSPP_COMP_1_2,
+	DPU_SSPP_COMP_2,
+	DPU_SSPP_COMP_3,
+
+	DPU_SSPP_COMP_MAX
+};
+
+/**
+ * DPU_SSPP_RECT_SOLO - multirect disabled
+ * DPU_SSPP_RECT_0 - rect0 of a multirect pipe
+ * DPU_SSPP_RECT_1 - rect1 of a multirect pipe
+ *
+ * Note: HW supports multirect with either RECT0 or
+ * RECT1. Considering no benefit of such configs over
+ * SOLO mode and to keep the plane management simple,
+ * we dont support single rect multirect configs.
+ */
+enum dpu_sspp_multirect_index {
+	DPU_SSPP_RECT_SOLO = 0,
+	DPU_SSPP_RECT_0,
+	DPU_SSPP_RECT_1,
+};
+
+enum dpu_sspp_multirect_mode {
+	DPU_SSPP_MULTIRECT_NONE = 0,
+	DPU_SSPP_MULTIRECT_PARALLEL,
+	DPU_SSPP_MULTIRECT_TIME_MX,
+};
+
+enum {
+	DPU_FRAME_LINEAR,
+	DPU_FRAME_TILE_A4X,
+	DPU_FRAME_TILE_A5X,
+};
+
+enum dpu_hw_filter {
+	DPU_SCALE_FILTER_NEAREST = 0,
+	DPU_SCALE_FILTER_BIL,
+	DPU_SCALE_FILTER_PCMN,
+	DPU_SCALE_FILTER_CA,
+	DPU_SCALE_FILTER_MAX
+};
+
+enum dpu_hw_filter_alpa {
+	DPU_SCALE_ALPHA_PIXEL_REP,
+	DPU_SCALE_ALPHA_BIL
+};
+
+enum dpu_hw_filter_yuv {
+	DPU_SCALE_2D_4X4,
+	DPU_SCALE_2D_CIR,
+	DPU_SCALE_1D_SEP,
+	DPU_SCALE_BIL
+};
+
+struct dpu_hw_sharp_cfg {
+	u32 strength;
+	u32 edge_thr;
+	u32 smooth_thr;
+	u32 noise_thr;
+};
+
+struct dpu_hw_pixel_ext {
+	/* scaling factors are enabled for this input layer */
+	uint8_t enable_pxl_ext;
+
+	int init_phase_x[DPU_MAX_PLANES];
+	int phase_step_x[DPU_MAX_PLANES];
+	int init_phase_y[DPU_MAX_PLANES];
+	int phase_step_y[DPU_MAX_PLANES];
+
+	/*
+	 * Number of pixels extension in left, right, top and bottom direction
+	 * for all color components. This pixel value for each color component
+	 * should be sum of fetch + repeat pixels.
+	 */
+	int num_ext_pxls_left[DPU_MAX_PLANES];
+	int num_ext_pxls_right[DPU_MAX_PLANES];
+	int num_ext_pxls_top[DPU_MAX_PLANES];
+	int num_ext_pxls_btm[DPU_MAX_PLANES];
+
+	/*
+	 * Number of pixels needs to be overfetched in left, right, top and
+	 * bottom directions from source image for scaling.
+	 */
+	int left_ftch[DPU_MAX_PLANES];
+	int right_ftch[DPU_MAX_PLANES];
+	int top_ftch[DPU_MAX_PLANES];
+	int btm_ftch[DPU_MAX_PLANES];
+
+	/*
+	 * Number of pixels needs to be repeated in left, right, top and
+	 * bottom directions for scaling.
+	 */
+	int left_rpt[DPU_MAX_PLANES];
+	int right_rpt[DPU_MAX_PLANES];
+	int top_rpt[DPU_MAX_PLANES];
+	int btm_rpt[DPU_MAX_PLANES];
+
+	uint32_t roi_w[DPU_MAX_PLANES];
+	uint32_t roi_h[DPU_MAX_PLANES];
+
+	/*
+	 * Filter type to be used for scaling in horizontal and vertical
+	 * directions
+	 */
+	enum dpu_hw_filter horz_filter[DPU_MAX_PLANES];
+	enum dpu_hw_filter vert_filter[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_hw_pipe_cfg : Pipe description
+ * @layout:    format layout information for programming buffer to hardware
+ * @src_rect:  src ROI, caller takes into account the different operations
+ *             such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @index:     index of the rectangle of SSPP
+ * @mode:      parallel or time multiplex multirect mode
+ */
+struct dpu_hw_pipe_cfg {
+	struct dpu_hw_fmt_layout layout;
+	struct drm_rect src_rect;
+	struct drm_rect dst_rect;
+	enum dpu_sspp_multirect_index index;
+	enum dpu_sspp_multirect_mode mode;
+};
+
+/**
+ * struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_pipe_qos_cfg {
+	u32 danger_lut;
+	u32 safe_lut;
+	u64 creq_lut;
+	u32 creq_vblank;
+	u32 danger_vblank;
+	bool vblank_en;
+	bool danger_safe_en;
+};
+
+/**
+ * enum CDP preload ahead address size
+ */
+enum {
+	DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+	DPU_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct dpu_hw_pipe_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ *	DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ *	DPU_SSPP_CDP_PRELOAD_AHEAD_64
+ */
+struct dpu_hw_pipe_cdp_cfg {
+	bool enable;
+	bool ubwc_meta_enable;
+	bool tile_amortize_enable;
+	u32 preload_ahead;
+};
+
+/**
+ * struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct dpu_hw_pipe_ts_cfg {
+	u64 size;
+	u64 time;
+};
+
+/**
+ * struct dpu_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_sspp_ops {
+	/**
+	 * setup_format - setup pixel format cropping rectangle, flip
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe config structure
+	 * @flags: Extra flags for format config
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_format)(struct dpu_hw_pipe *ctx,
+			const struct dpu_format *fmt, u32 flags,
+			enum dpu_sspp_multirect_index index);
+
+	/**
+	 * setup_rects - setup pipe ROI rectangles
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe config structure
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_rects)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_cfg *cfg,
+			enum dpu_sspp_multirect_index index);
+
+	/**
+	 * setup_pe - setup pipe pixel extension
+	 * @ctx: Pointer to pipe context
+	 * @pe_ext: Pointer to pixel ext settings
+	 */
+	void (*setup_pe)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pixel_ext *pe_ext);
+
+	/**
+	 * setup_sourceaddress - setup pipe source addresses
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe config structure
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_sourceaddress)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_cfg *cfg,
+			enum dpu_sspp_multirect_index index);
+
+	/**
+	 * setup_csc - setup color space coversion
+	 * @ctx: Pointer to pipe context
+	 * @data: Pointer to config structure
+	 */
+	void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data);
+
+	/**
+	 * setup_solidfill - enable/disable colorfill
+	 * @ctx: Pointer to pipe context
+	 * @const_color: Fill color value
+	 * @flags: Pipe flags
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_solidfill)(struct dpu_hw_pipe *ctx, u32 color,
+			enum dpu_sspp_multirect_index index);
+
+	/**
+	 * setup_multirect - setup multirect configuration
+	 * @ctx: Pointer to pipe context
+	 * @index: rectangle index in multirect
+	 * @mode: parallel fetch / time multiplex multirect mode
+	 */
+
+	void (*setup_multirect)(struct dpu_hw_pipe *ctx,
+			enum dpu_sspp_multirect_index index,
+			enum dpu_sspp_multirect_mode mode);
+
+	/**
+	 * setup_sharpening - setup sharpening
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to config structure
+	 */
+	void (*setup_sharpening)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_sharp_cfg *cfg);
+
+	/**
+	 * setup_danger_safe_lut - setup danger safe LUTs
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 *
+	 */
+	void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_qos_cfg *cfg);
+
+	/**
+	 * setup_creq_lut - setup CREQ LUT
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 *
+	 */
+	void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_qos_cfg *cfg);
+
+	/**
+	 * setup_qos_ctrl - setup QoS control
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 *
+	 */
+	void (*setup_qos_ctrl)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_qos_cfg *cfg);
+
+	/**
+	 * setup_histogram - setup histograms
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to histogram configuration
+	 */
+	void (*setup_histogram)(struct dpu_hw_pipe *ctx,
+			void *cfg);
+
+	/**
+	 * setup_scaler - setup scaler
+	 * @ctx: Pointer to pipe context
+	 * @pipe_cfg: Pointer to pipe configuration
+	 * @pe_cfg: Pointer to pixel extension configuration
+	 * @scaler_cfg: Pointer to scaler configuration
+	 */
+	void (*setup_scaler)(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cfg *pipe_cfg,
+		struct dpu_hw_pixel_ext *pe_cfg,
+		void *scaler_cfg);
+
+	/**
+	 * get_scaler_ver - get scaler h/w version
+	 * @ctx: Pointer to pipe context
+	 */
+	u32 (*get_scaler_ver)(struct dpu_hw_pipe *ctx);
+
+	/**
+	 * setup_cdp - setup client driven prefetch
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to cdp configuration
+	 */
+	void (*setup_cdp)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_cdp_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_pipe - pipe description
+ * @base: hardware block base structure
+ * @hw: block hardware details
+ * @catalog: back pointer to catalog
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: pipe index
+ * @cap: pointer to layer_cfg
+ * @ops: pointer to operations possible for this pipe
+ */
+struct dpu_hw_pipe {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+	struct dpu_mdss_cfg *catalog;
+	struct dpu_mdp_cfg *mdp;
+
+	/* Pipe */
+	enum dpu_sspp idx;
+	const struct dpu_sspp_cfg *cap;
+
+	/* Ops */
+	struct dpu_hw_sspp_ops ops;
+};
+
+/**
+ * dpu_hw_pipe - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_pipe, base);
+}
+
+/**
+ * dpu_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx:  Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @catalog : Pointer to mdss catalog data
+ * @is_virtual_pipe: is this pipe virtual pipe
+ */
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+		void __iomem *addr, struct dpu_mdss_cfg *catalog,
+		bool is_virtual_pipe);
+
+/**
+ * dpu_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx:  Pointer to SSPP driver context returned by dpu_hw_sspp_init
+ */
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx);
+
+#endif /*_DPU_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
new file mode 100644
index 0000000..db2798e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -0,0 +1,398 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_top.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define SSPP_SPARE                        0x28
+#define UBWC_STATIC                       0x144
+
+#define FLD_SPLIT_DISPLAY_CMD             BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN          BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX             BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX             BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define DANGER_STATUS                     0x360
+#define SAFE_STATUS                       0x364
+
+#define TE_LINE_INTERVAL                  0x3F4
+
+#define TRAFFIC_SHAPER_EN                 BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num)     (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num)     (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR    4
+
+#define MDP_WD_TIMER_0_CTL                0x380
+#define MDP_WD_TIMER_0_CTL2               0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE         0x388
+#define MDP_WD_TIMER_1_CTL                0x390
+#define MDP_WD_TIMER_1_CTL2               0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE         0x398
+#define MDP_WD_TIMER_2_CTL                0x420
+#define MDP_WD_TIMER_2_CTL2               0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE         0x428
+#define MDP_WD_TIMER_3_CTL                0x430
+#define MDP_WD_TIMER_3_CTL2               0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE         0x438
+#define MDP_WD_TIMER_4_CTL                0x440
+#define MDP_WD_TIMER_4_CTL2               0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE         0x448
+
+#define MDP_TICK_COUNT                    16
+#define XO_CLK_RATE                       19200
+#define MS_TICKS_IN_SEC                   1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+	((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
+#define DCE_SEL                           0x450
+
+static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
+		struct split_pipe_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 upper_pipe = 0;
+	u32 lower_pipe = 0;
+
+	if (!mdp || !cfg)
+		return;
+
+	c = &mdp->hw;
+
+	if (cfg->en) {
+		if (cfg->mode == INTF_MODE_CMD) {
+			lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+			/* interface controlling sw trigger */
+			if (cfg->intf == INTF_2)
+				lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+			else
+				lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+			upper_pipe = lower_pipe;
+		} else {
+			if (cfg->intf == INTF_2) {
+				lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+				upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+			} else {
+				lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+				upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+			}
+		}
+	}
+
+	DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+	DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+	DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+	DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
+		struct cdm_output_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 out_ctl = 0;
+
+	if (!mdp || !cfg)
+		return;
+
+	c = &mdp->hw;
+
+	if (cfg->intf_en)
+		out_ctl |= BIT(19);
+
+	DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
+}
+
+static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
+		enum dpu_clk_ctrl_type clk_ctrl, bool enable)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg_off, bit_off;
+	u32 reg_val, new_val;
+	bool clk_forced_on;
+
+	if (!mdp)
+		return false;
+
+	c = &mdp->hw;
+
+	if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
+		return false;
+
+	reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
+	bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
+
+	reg_val = DPU_REG_READ(c, reg_off);
+
+	if (enable)
+		new_val = reg_val | BIT(bit_off);
+	else
+		new_val = reg_val & ~BIT(bit_off);
+
+	DPU_REG_WRITE(c, reg_off, new_val);
+
+	clk_forced_on = !(reg_val & BIT(bit_off));
+
+	return clk_forced_on;
+}
+
+
+static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
+		struct dpu_danger_safe_status *status)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 value;
+
+	if (!mdp || !status)
+		return;
+
+	c = &mdp->hw;
+
+	value = DPU_REG_READ(c, DANGER_STATUS);
+	status->mdp = (value >> 0) & 0x3;
+	status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+	status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+	status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+	status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+	status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+	status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+	status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+	status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+	status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+	status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+	status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+	status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+}
+
+static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
+		struct dpu_vsync_source_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
+	static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
+
+	if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
+		return;
+
+	c = &mdp->hw;
+	reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
+	for (i = 0; i < cfg->pp_count; i++) {
+		int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+
+		if (pp_idx >= ARRAY_SIZE(pp_offset))
+			continue;
+
+		reg &= ~(0xf << pp_offset[pp_idx]);
+		reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
+	}
+	DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+	if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
+			cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
+		switch (cfg->vsync_source) {
+		case DPU_VSYNC_SOURCE_WD_TIMER_4:
+			wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_4_CTL;
+			wd_ctl2 = MDP_WD_TIMER_4_CTL2;
+			break;
+		case DPU_VSYNC_SOURCE_WD_TIMER_3:
+			wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_3_CTL;
+			wd_ctl2 = MDP_WD_TIMER_3_CTL2;
+			break;
+		case DPU_VSYNC_SOURCE_WD_TIMER_2:
+			wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_2_CTL;
+			wd_ctl2 = MDP_WD_TIMER_2_CTL2;
+			break;
+		case DPU_VSYNC_SOURCE_WD_TIMER_1:
+			wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_1_CTL;
+			wd_ctl2 = MDP_WD_TIMER_1_CTL2;
+			break;
+		case DPU_VSYNC_SOURCE_WD_TIMER_0:
+		default:
+			wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_0_CTL;
+			wd_ctl2 = MDP_WD_TIMER_0_CTL2;
+			break;
+		}
+
+		DPU_REG_WRITE(c, wd_load_value,
+			CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+		DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
+		reg = DPU_REG_READ(c, wd_ctl2);
+		reg |= BIT(8);		/* enable heartbeat timer */
+		reg |= BIT(0);		/* enable WD timer */
+		DPU_REG_WRITE(c, wd_ctl2, reg);
+
+		/* make sure that timers are enabled/disabled for vsync state */
+		wmb();
+	}
+}
+
+static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
+		struct dpu_danger_safe_status *status)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 value;
+
+	if (!mdp || !status)
+		return;
+
+	c = &mdp->hw;
+
+	value = DPU_REG_READ(c, SAFE_STATUS);
+	status->mdp = (value >> 0) & 0x1;
+	status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+	status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+	status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+	status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+	status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+	status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+	status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+	status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+	status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+	status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+	status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+	status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+}
+
+static void dpu_hw_reset_ubwc(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_blk_reg_map c;
+
+	if (!mdp || !m)
+		return;
+
+	if (!IS_UBWC_20_SUPPORTED(m->caps->ubwc_version))
+		return;
+
+	/* force blk offset to zero to access beginning of register region */
+	c = mdp->hw;
+	c.blk_off = 0x0;
+	DPU_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
+}
+
+static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
+{
+	struct dpu_hw_blk_reg_map *c;
+
+	if (!mdp)
+		return;
+
+	c = &mdp->hw;
+
+	DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
+}
+
+static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+		unsigned long cap)
+{
+	ops->setup_split_pipe = dpu_hw_setup_split_pipe;
+	ops->setup_cdm_output = dpu_hw_setup_cdm_output;
+	ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
+	ops->get_danger_status = dpu_hw_get_danger_status;
+	ops->setup_vsync_source = dpu_hw_setup_vsync_source;
+	ops->get_safe_status = dpu_hw_get_safe_status;
+	ops->reset_ubwc = dpu_hw_reset_ubwc;
+	ops->intf_audio_select = dpu_hw_intf_audio_select;
+}
+
+static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
+		const struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	if (!m || !addr || !b)
+		return ERR_PTR(-EINVAL);
+
+	for (i = 0; i < m->mdp_count; i++) {
+		if (mdp == m->mdp[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->mdp[i].base;
+			b->length = m->mdp[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_TOP;
+			return &m->mdp[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+		void __iomem *addr,
+		const struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_mdp *mdp;
+	const struct dpu_mdp_cfg *cfg;
+	int rc;
+
+	if (!addr || !m)
+		return ERR_PTR(-EINVAL);
+
+	mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+	if (!mdp)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _top_offset(idx, m, addr, &mdp->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(mdp);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * Assign ops
+	 */
+	mdp->idx = idx;
+	mdp->caps = cfg;
+	_setup_mdp_ops(&mdp->ops, mdp->caps->features);
+
+	rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+
+	return mdp;
+
+blk_init_error:
+	kzfree(mdp);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
+{
+	if (mdp)
+		dpu_hw_blk_destroy(&mdp->base);
+	kfree(mdp);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
new file mode 100644
index 0000000..899925a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -0,0 +1,202 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_TOP_H
+#define _DPU_HW_TOP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en        : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+	bool en;
+	bool rd_client;
+	u32 client_id;
+	u32 bpc_denom;
+	u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en        : Enable/disable dual pipe confguration
+ * @mode      : Panel interface mode
+ * @intf      : Interface id for main control path
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ *              flushed
+ */
+struct split_pipe_cfg {
+	bool en;
+	enum dpu_intf_mode mode;
+	enum dpu_intf intf;
+	bool split_flush_en;
+};
+
+/**
+ * struct cdm_output_cfg: output configuration for cdm
+ * @intf_en   : enable/disable interface output
+ */
+struct cdm_output_cfg {
+	bool intf_en;
+};
+
+/**
+ * struct dpu_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ */
+struct dpu_danger_safe_status {
+	u8 mdp;
+	u8 sspp[SSPP_MAX];
+};
+
+/**
+ * struct dpu_vsync_source_cfg - configure vsync source and configure the
+ *                                    watchdog timers if required.
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: ping pong index array
+ * @vsync_source: vsync source selection
+ */
+struct dpu_vsync_source_cfg {
+	u32 pp_count;
+	u32 frame_rate;
+	u32 ppnumber[PINGPONG_MAX];
+	u32 vsync_source;
+};
+
+/**
+ * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_cdm_output : programs cdm control
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct dpu_hw_mdp_ops {
+	/** setup_split_pipe() : Regsiters are not double buffered, thisk
+	 * function should be called before timing control enable
+	 * @mdp  : mdp top context driver
+	 * @cfg  : upper and lower part of pipe configuration
+	 */
+	void (*setup_split_pipe)(struct dpu_hw_mdp *mdp,
+			struct split_pipe_cfg *p);
+
+	/**
+	 * setup_cdm_output() : Setup selection control of the cdm data path
+	 * @mdp  : mdp top context driver
+	 * @cfg  : cdm output configuration
+	 */
+	void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
+			struct cdm_output_cfg *cfg);
+
+	/**
+	 * setup_traffic_shaper() : Setup traffic shaper control
+	 * @mdp  : mdp top context driver
+	 * @cfg  : traffic shaper configuration
+	 */
+	void (*setup_traffic_shaper)(struct dpu_hw_mdp *mdp,
+			struct traffic_shaper_cfg *cfg);
+
+	/**
+	 * setup_clk_force_ctrl - set clock force control
+	 * @mdp: mdp top context driver
+	 * @clk_ctrl: clock to be controlled
+	 * @enable: force on enable
+	 * @return: if the clock is forced-on by this function
+	 */
+	bool (*setup_clk_force_ctrl)(struct dpu_hw_mdp *mdp,
+			enum dpu_clk_ctrl_type clk_ctrl, bool enable);
+
+	/**
+	 * get_danger_status - get danger status
+	 * @mdp: mdp top context driver
+	 * @status: Pointer to danger safe status
+	 */
+	void (*get_danger_status)(struct dpu_hw_mdp *mdp,
+			struct dpu_danger_safe_status *status);
+
+	/**
+	 * setup_vsync_source - setup vsync source configuration details
+	 * @mdp: mdp top context driver
+	 * @cfg: vsync source selection configuration
+	 */
+	void (*setup_vsync_source)(struct dpu_hw_mdp *mdp,
+				struct dpu_vsync_source_cfg *cfg);
+
+	/**
+	 * get_safe_status - get safe status
+	 * @mdp: mdp top context driver
+	 * @status: Pointer to danger safe status
+	 */
+	void (*get_safe_status)(struct dpu_hw_mdp *mdp,
+			struct dpu_danger_safe_status *status);
+
+	/**
+	 * reset_ubwc - reset top level UBWC configuration
+	 * @mdp: mdp top context driver
+	 * @m: pointer to mdss catalog data
+	 */
+	void (*reset_ubwc)(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m);
+
+	/**
+	 * intf_audio_select - select the external interface for audio
+	 * @mdp: mdp top context driver
+	 */
+	void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
+};
+
+struct dpu_hw_mdp {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* top */
+	enum dpu_mdp idx;
+	const struct dpu_mdp_cfg *caps;
+
+	/* ops */
+	struct dpu_hw_mdp_ops ops;
+};
+
+/**
+ * to_dpu_hw_mdp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_mdp, base);
+}
+
+/**
+ * dpu_hw_mdptop_init - initializes the top driver for the passed idx
+ * @idx:  Interface index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m:    Pointer to mdss catalog data
+ */
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+		void __iomem *addr,
+		const struct dpu_mdss_cfg *m);
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
+
+#endif /*_DPU_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index 1ba571e..4cabae4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -92,59 +92,6 @@
 	return &dpu_hw_util_log_mask;
 }
 
-void dpu_set_scaler_v2(struct dpu_hw_scaler3_cfg *cfg,
-		const struct dpu_drm_scaler_v2 *scale_v2)
-{
-	int i;
-
-	cfg->enable = scale_v2->enable;
-	cfg->dir_en = scale_v2->dir_en;
-
-	for (i = 0; i < DPU_MAX_PLANES; i++) {
-		cfg->init_phase_x[i] = scale_v2->init_phase_x[i];
-		cfg->phase_step_x[i] = scale_v2->phase_step_x[i];
-		cfg->init_phase_y[i] = scale_v2->init_phase_y[i];
-		cfg->phase_step_y[i] = scale_v2->phase_step_y[i];
-
-		cfg->preload_x[i] = scale_v2->preload_x[i];
-		cfg->preload_y[i] = scale_v2->preload_y[i];
-		cfg->src_width[i] = scale_v2->src_width[i];
-		cfg->src_height[i] = scale_v2->src_height[i];
-	}
-
-	cfg->dst_width = scale_v2->dst_width;
-	cfg->dst_height = scale_v2->dst_height;
-
-	cfg->y_rgb_filter_cfg = scale_v2->y_rgb_filter_cfg;
-	cfg->uv_filter_cfg = scale_v2->uv_filter_cfg;
-	cfg->alpha_filter_cfg = scale_v2->alpha_filter_cfg;
-	cfg->blend_cfg = scale_v2->blend_cfg;
-
-	cfg->lut_flag = scale_v2->lut_flag;
-	cfg->dir_lut_idx = scale_v2->dir_lut_idx;
-	cfg->y_rgb_cir_lut_idx = scale_v2->y_rgb_cir_lut_idx;
-	cfg->uv_cir_lut_idx = scale_v2->uv_cir_lut_idx;
-	cfg->y_rgb_sep_lut_idx = scale_v2->y_rgb_sep_lut_idx;
-	cfg->uv_sep_lut_idx = scale_v2->uv_sep_lut_idx;
-
-	cfg->de.enable = scale_v2->de.enable;
-	cfg->de.sharpen_level1 = scale_v2->de.sharpen_level1;
-	cfg->de.sharpen_level2 = scale_v2->de.sharpen_level2;
-	cfg->de.clip = scale_v2->de.clip;
-	cfg->de.limit = scale_v2->de.limit;
-	cfg->de.thr_quiet = scale_v2->de.thr_quiet;
-	cfg->de.thr_dieout = scale_v2->de.thr_dieout;
-	cfg->de.thr_low = scale_v2->de.thr_low;
-	cfg->de.thr_high = scale_v2->de.thr_high;
-	cfg->de.prec_shift = scale_v2->de.prec_shift;
-
-	for (i = 0; i < DPU_MAX_DE_CURVES; i++) {
-		cfg->de.adjust_a[i] = scale_v2->de.adjust_a[i];
-		cfg->de.adjust_b[i] = scale_v2->de.adjust_b[i];
-		cfg->de.adjust_c[i] = scale_v2->de.adjust_c[i];
-	}
-}
-
 static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
 		struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
 {
@@ -419,34 +366,3 @@
 	DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
 	DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
 }
-
-/**
- * _dpu_copy_formats   - copy formats from src_list to dst_list
- * @dst_list:          pointer to destination list where to copy formats
- * @dst_list_size:     size of destination list
- * @dst_list_pos:      starting position on the list where to copy formats
- * @src_list:          pointer to source list where to copy formats from
- * @src_list_size:     size of source list
- * Return: number of elements populated
- */
-uint32_t dpu_copy_formats(
-		struct dpu_format_extended *dst_list,
-		uint32_t dst_list_size,
-		uint32_t dst_list_pos,
-		const struct dpu_format_extended *src_list,
-		uint32_t src_list_size)
-{
-	uint32_t cur_pos, i;
-
-	if (!dst_list || !src_list || (dst_list_pos >= (dst_list_size - 1)))
-		return 0;
-
-	for (i = 0, cur_pos = dst_list_pos;
-		(cur_pos < (dst_list_size - 1)) && (i < src_list_size)
-		&& src_list[i].fourcc_format; ++i, ++cur_pos)
-		dst_list[cur_pos] = src_list[i];
-
-	dst_list[cur_pos].fourcc_format = 0;
-
-	return i;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 42f1b22..1240f50 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -333,9 +333,6 @@
 
 void *dpu_hw_util_get_dir(void);
 
-void dpu_set_scaler_v2(struct dpu_hw_scaler3_cfg *cfg,
-		const struct dpu_drm_scaler_v2 *scale_v2);
-
 void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
 		struct dpu_hw_scaler3_cfg *scaler3_cfg,
 		u32 scaler_offset, u32 scaler_version,
@@ -348,11 +345,4 @@
 		u32 csc_reg_off,
 		struct dpu_csc_cfg *data, bool csc10);
 
-uint32_t dpu_copy_formats(
-		struct dpu_format_extended *dst_list,
-		uint32_t dst_list_size,
-		uint32_t dst_list_pos,
-		const struct dpu_format_extended *src_list,
-		uint32_t src_list_size);
-
 #endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
new file mode 100644
index 0000000..d439055
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -0,0 +1,275 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_dbg.h"
+
+#define VBIF_VERSION			0x0000
+#define VBIF_CLK_FORCE_CTRL0		0x0008
+#define VBIF_CLK_FORCE_CTRL1		0x000C
+#define VBIF_QOS_REMAP_00		0x0020
+#define VBIF_QOS_REMAP_01		0x0024
+#define VBIF_QOS_REMAP_10		0x0028
+#define VBIF_QOS_REMAP_11		0x002C
+#define VBIF_WRITE_GATHER_EN		0x00AC
+#define VBIF_IN_RD_LIM_CONF0		0x00B0
+#define VBIF_IN_RD_LIM_CONF1		0x00B4
+#define VBIF_IN_RD_LIM_CONF2		0x00B8
+#define VBIF_IN_WR_LIM_CONF0		0x00C0
+#define VBIF_IN_WR_LIM_CONF1		0x00C4
+#define VBIF_IN_WR_LIM_CONF2		0x00C8
+#define VBIF_OUT_RD_LIM_CONF0		0x00D0
+#define VBIF_OUT_WR_LIM_CONF0		0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0	0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1	0x0164
+#define VBIF_XIN_PND_ERR		0x0190
+#define VBIF_XIN_SRC_ERR		0x0194
+#define VBIF_XIN_CLR_ERR		0x019C
+#define VBIF_XIN_HALT_CTRL0		0x0200
+#define VBIF_XIN_HALT_CTRL1		0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000	0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000	0x0590
+
+static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
+		u32 *pnd_errors, u32 *src_errors)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 pnd, src;
+
+	if (!vbif)
+		return;
+	c = &vbif->hw;
+	pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
+	src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
+
+	if (pnd_errors)
+		*pnd_errors = pnd;
+	if (src_errors)
+		*src_errors = src;
+
+	DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
+}
+
+static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
+		u32 xin_id, u32 value)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg_off;
+	u32 bit_off;
+	u32 reg_val;
+
+	/*
+	 * Assume 4 bits per bit field, 8 fields per 32-bit register so
+	 * 16 bit fields maximum across two registers
+	 */
+	if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+		return;
+
+	c = &vbif->hw;
+
+	if (xin_id >= 8) {
+		xin_id -= 8;
+		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+	} else {
+		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+	}
+	bit_off = (xin_id & 0x7) * 4;
+	reg_val = DPU_REG_READ(c, reg_off);
+	reg_val &= ~(0x7 << bit_off);
+	reg_val |= (value & 0x7) << bit_off;
+	DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
+		u32 xin_id, bool rd, u32 limit)
+{
+	struct dpu_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+	u32 reg_off;
+	u32 bit_off;
+
+	if (rd)
+		reg_off = VBIF_IN_RD_LIM_CONF0;
+	else
+		reg_off = VBIF_IN_WR_LIM_CONF0;
+
+	reg_off += (xin_id / 4) * 4;
+	bit_off = (xin_id % 4) * 8;
+	reg_val = DPU_REG_READ(c, reg_off);
+	reg_val &= ~(0xFF << bit_off);
+	reg_val |= (limit) << bit_off;
+	DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
+		u32 xin_id, bool rd)
+{
+	struct dpu_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+	u32 reg_off;
+	u32 bit_off;
+	u32 limit;
+
+	if (rd)
+		reg_off = VBIF_IN_RD_LIM_CONF0;
+	else
+		reg_off = VBIF_IN_WR_LIM_CONF0;
+
+	reg_off += (xin_id / 4) * 4;
+	bit_off = (xin_id % 4) * 8;
+	reg_val = DPU_REG_READ(c, reg_off);
+	limit = (reg_val >> bit_off) & 0xFF;
+
+	return limit;
+}
+
+static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
+		u32 xin_id, bool enable)
+{
+	struct dpu_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+
+	reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+	if (enable)
+		reg_val |= BIT(xin_id);
+	else
+		reg_val &= ~BIT(xin_id);
+
+	DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
+		u32 xin_id)
+{
+	struct dpu_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+
+	reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+	return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
+		u32 xin_id, u32 level, u32 remap_level)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+	if (!vbif)
+		return;
+
+	c = &vbif->hw;
+
+	reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+	reg_shift = (xin_id & 0x7) * 4;
+
+	reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+	reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+
+	mask = 0x7 << reg_shift;
+
+	reg_val &= ~mask;
+	reg_val |= (remap_level << reg_shift) & mask;
+
+	reg_val_lvl &= ~mask;
+	reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+	DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+	DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+}
+
+static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg_val;
+
+	if (!vbif || xin_id >= MAX_XIN_COUNT)
+		return;
+
+	c = &vbif->hw;
+
+	reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
+	reg_val |= BIT(xin_id);
+	DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
+}
+
+static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
+		unsigned long cap)
+{
+	ops->set_limit_conf = dpu_hw_set_limit_conf;
+	ops->get_limit_conf = dpu_hw_get_limit_conf;
+	ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
+	ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
+	if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
+		ops->set_qos_remap = dpu_hw_set_qos_remap;
+	ops->set_mem_type = dpu_hw_set_mem_type;
+	ops->clear_errors = dpu_hw_clear_errors;
+	ops->set_write_gather_en = dpu_hw_set_write_gather_en;
+}
+
+static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
+		const struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->vbif_count; i++) {
+		if (vbif == m->vbif[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->vbif[i].base;
+			b->length = m->vbif[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_VBIF;
+			return &m->vbif[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+		void __iomem *addr,
+		const struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_vbif *c;
+	const struct dpu_vbif_cfg *cfg;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _top_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * Assign ops
+	 */
+	c->idx = idx;
+	c->cap = cfg;
+	_setup_vbif_ops(&c->ops, c->cap->features);
+
+	/* no need to register sub-range in dpu dbg, dump entire vbif io base */
+
+	return c;
+}
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
+{
+	kfree(vbif);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
new file mode 100644
index 0000000..471ff67
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_VBIF_H
+#define _DPU_HW_VBIF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_vbif;
+
+/**
+ * struct dpu_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_vbif_ops {
+	/**
+	 * set_limit_conf - set transaction limit config
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @rd: true for read limit; false for write limit
+	 * @limit: outstanding transaction limit
+	 */
+	void (*set_limit_conf)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, bool rd, u32 limit);
+
+	/**
+	 * get_limit_conf - get transaction limit config
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @rd: true for read limit; false for write limit
+	 * @return: outstanding transaction limit
+	 */
+	u32 (*get_limit_conf)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, bool rd);
+
+	/**
+	 * set_halt_ctrl - set halt control
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @enable: halt control enable
+	 */
+	void (*set_halt_ctrl)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, bool enable);
+
+	/**
+	 * get_halt_ctrl - get halt control
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @return: halt control enable
+	 */
+	bool (*get_halt_ctrl)(struct dpu_hw_vbif *vbif,
+			u32 xin_id);
+
+	/**
+	 * set_qos_remap - set QoS priority remap
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @level: priority level
+	 * @remap_level: remapped level
+	 */
+	void (*set_qos_remap)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, u32 level, u32 remap_level);
+
+	/**
+	 * set_mem_type - set memory type
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @value: memory type value
+	 */
+	void (*set_mem_type)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, u32 value);
+
+	/**
+	 * clear_errors - clear any vbif errors
+	 *	This function clears any detected pending/source errors
+	 *	on the VBIF interface, and optionally returns the detected
+	 *	error mask(s).
+	 * @vbif: vbif context driver
+	 * @pnd_errors: pointer to pending error reporting variable
+	 * @src_errors: pointer to source error reporting variable
+	 */
+	void (*clear_errors)(struct dpu_hw_vbif *vbif,
+		u32 *pnd_errors, u32 *src_errors);
+
+	/**
+	 * set_write_gather_en - set write_gather enable
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 */
+	void (*set_write_gather_en)(struct dpu_hw_vbif *vbif, u32 xin_id);
+};
+
+struct dpu_hw_vbif {
+	/* base */
+	struct dpu_hw_blk_reg_map hw;
+
+	/* vbif */
+	enum dpu_vbif idx;
+	const struct dpu_vbif_cfg *cap;
+
+	/* ops */
+	struct dpu_hw_vbif_ops ops;
+};
+
+/**
+ * dpu_hw_vbif_init - initializes the vbif driver for the passed interface idx
+ * @idx:  Interface index for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ * @m:    Pointer to mdss catalog data
+ */
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+		void __iomem *addr,
+		const struct dpu_mdss_cfg *m);
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+
+#endif /*_DPU_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
new file mode 100644
index 0000000..5b2bc9b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HWIO_H
+#define _DPU_HWIO_H
+
+#include "dpu_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL                   0x004
+#define INTR_EN                         0x010
+#define INTR_STATUS                     0x014
+#define INTR_CLEAR                      0x018
+#define INTR2_EN                        0x008
+#define INTR2_STATUS                    0x00c
+#define INTR2_CLEAR                     0x02c
+#define HIST_INTR_EN                    0x01c
+#define HIST_INTR_STATUS                0x020
+#define HIST_INTR_CLEAR                 0x024
+#define INTF_INTR_EN                    0x1C0
+#define INTF_INTR_STATUS                0x1C4
+#define INTF_INTR_CLEAR                 0x1C8
+#define SPLIT_DISPLAY_EN                0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL   0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN        0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN        0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN        0x308
+#define HW_EVENTS_CTL                   0x37C
+#define CLK_CTRL3                       0x3A8
+#define CLK_STATUS3                     0x3AC
+#define CLK_CTRL4                       0x3B0
+#define CLK_STATUS4                     0x3B4
+#define CLK_CTRL5                       0x3B8
+#define CLK_STATUS5                     0x3BC
+#define CLK_CTRL7                       0x3D0
+#define CLK_STATUS7                     0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL   0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL  0x3F4
+#define INTF_SW_RESET_MASK              0x3FC
+#define HDMI_DP_CORE_SELECT             0x408
+#define MDP_OUT_CTL_0                   0x410
+#define MDP_VSYNC_SEL                   0x414
+#define DCE_SEL                         0x450
+
+#endif /*_DPU_HWIO_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
new file mode 100644
index 0000000..b557687
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -0,0 +1,203 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include "dpu_io_util.h"
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+	int i;
+
+	for (i = num_clk - 1; i >= 0; i--) {
+		if (clk_arry[i].clk)
+			clk_put(clk_arry[i].clk);
+		clk_arry[i].clk = NULL;
+	}
+}
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+		rc = PTR_ERR_OR_ZERO(clk_arry[i].clk);
+		if (rc) {
+			DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name, rc);
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	for (i--; i >= 0; i--) {
+		if (clk_arry[i].clk)
+			clk_put(clk_arry[i].clk);
+		clk_arry[i].clk = NULL;
+	}
+
+	return rc;
+}
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		if (clk_arry[i].clk) {
+			if (clk_arry[i].type != DSS_CLK_AHB) {
+				DEV_DBG("%pS->%s: '%s' rate %ld\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name,
+					clk_arry[i].rate);
+				rc = clk_set_rate(clk_arry[i].clk,
+					clk_arry[i].rate);
+				if (rc) {
+					DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+					break;
+				}
+			}
+		} else {
+			DEV_ERR("%pS->%s: '%s' is not available\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			rc = -EPERM;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+	int i, rc = 0;
+
+	if (enable) {
+		for (i = 0; i < num_clk; i++) {
+			DEV_DBG("%pS->%s: enable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			if (clk_arry[i].clk) {
+				rc = clk_prepare_enable(clk_arry[i].clk);
+				if (rc)
+					DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+			} else {
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+				rc = -EPERM;
+			}
+
+			if (rc) {
+				msm_dss_enable_clk(&clk_arry[i],
+					i, false);
+				break;
+			}
+		}
+	} else {
+		for (i = num_clk - 1; i >= 0; i--) {
+			DEV_DBG("%pS->%s: disable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+
+			if (clk_arry[i].clk)
+				clk_disable_unprepare(clk_arry[i].clk);
+			else
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+		}
+	}
+
+	return rc;
+}
+
+int msm_dss_parse_clock(struct platform_device *pdev,
+			struct dss_module_power *mp)
+{
+	u32 i, rc = 0;
+	const char *clock_name;
+	int num_clk = 0;
+
+	if (!pdev || !mp)
+		return -EINVAL;
+
+	mp->num_clk = 0;
+	num_clk = of_property_count_strings(pdev->dev.of_node, "clock-names");
+	if (num_clk <= 0) {
+		pr_debug("clocks are not defined\n");
+		return 0;
+	}
+
+	mp->clk_config = devm_kcalloc(&pdev->dev,
+				      num_clk, sizeof(struct dss_clk),
+				      GFP_KERNEL);
+	if (!mp->clk_config)
+		return -ENOMEM;
+
+	for (i = 0; i < num_clk; i++) {
+		rc = of_property_read_string_index(pdev->dev.of_node,
+						   "clock-names", i,
+						   &clock_name);
+		if (rc) {
+			dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+				i);
+			break;
+		}
+		strlcpy(mp->clk_config[i].clk_name, clock_name,
+			sizeof(mp->clk_config[i].clk_name));
+
+		mp->clk_config[i].type = DSS_CLK_AHB;
+	}
+
+	rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+		goto err;
+	}
+
+	rc = of_clk_set_defaults(pdev->dev.of_node, false);
+	if (rc) {
+		dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+		goto err;
+	}
+
+	for (i = 0; i < num_clk; i++) {
+		u32 rate = clk_get_rate(mp->clk_config[i].clk);
+		if (!rate)
+			continue;
+		mp->clk_config[i].rate = rate;
+		mp->clk_config[i].type = DSS_CLK_PCLK;
+	}
+
+	mp->num_clk = num_clk;
+	return 0;
+
+err:
+	msm_dss_put_clk(mp->clk_config, num_clk);
+	return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
new file mode 100644
index 0000000..bc07381
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IO_UTIL_H__
+#define __DPU_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define DEV_DBG(fmt, args...)   pr_debug(fmt, ##args)
+#define DEV_INFO(fmt, args...)  pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...)  pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...)   pr_err(fmt, ##args)
+
+struct dss_gpio {
+	unsigned int gpio;
+	unsigned int value;
+	char gpio_name[32];
+};
+
+enum dss_clk_type {
+	DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+	DSS_CLK_PCLK,
+};
+
+struct dss_clk {
+	struct clk *clk; /* clk handle */
+	char clk_name[32];
+	enum dss_clk_type type;
+	unsigned long rate;
+	unsigned long max_rate;
+};
+
+struct dss_module_power {
+	unsigned int num_gpio;
+	struct dss_gpio *gpio_config;
+	unsigned int num_clk;
+	struct dss_clk *clk_config;
+};
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+int msm_dss_parse_clock(struct platform_device *pdev,
+		struct dss_module_power *mp);
+#endif /* __DPU_IO_UTIL_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
new file mode 100644
index 0000000..d5e6ce0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_irq.h"
+#include "dpu_core_irq.h"
+
+irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+	return dpu_core_irq(dpu_kms);
+}
+
+void dpu_irq_preinstall(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+	if (!dpu_kms->dev || !dpu_kms->dev->dev) {
+		pr_err("invalid device handles\n");
+		return;
+	}
+
+	dpu_core_irq_preinstall(dpu_kms);
+}
+
+int dpu_irq_postinstall(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+	int rc;
+
+	if (!kms) {
+		DPU_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	rc = dpu_core_irq_postinstall(dpu_kms);
+
+	return rc;
+}
+
+void dpu_irq_uninstall(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+	if (!kms) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	dpu_core_irq_uninstall(dpu_kms);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
new file mode 100644
index 0000000..3e147f7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IRQ_H__
+#define __DPU_IRQ_H__
+
+#include <linux/kernel.h>
+#include <linux/irqdomain.h>
+
+#include "msm_kms.h"
+
+/**
+ * dpu_irq_controller - define MDSS level interrupt controller context
+ * @enabled_mask:	enable status of MDSS level interrupt
+ * @domain:		interrupt domain of this controller
+ */
+struct dpu_irq_controller {
+	unsigned long enabled_mask;
+	struct irq_domain *domain;
+};
+
+/**
+ * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
+ * @kms:		pointer to kms context
+ * @return:		none
+ */
+void dpu_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
+ * @kms:		pointer to kms context
+ * @return:		0 if success; error code otherwise
+ */
+int dpu_irq_postinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_uninstall - uninstall MDSS IRQ handler
+ * @drm_dev:		pointer to kms context
+ * @return:		none
+ */
+void dpu_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq - MDSS level IRQ handler
+ * @kms:		pointer to kms context
+ * @return:		interrupt handling status
+ */
+irqreturn_t dpu_irq(struct msm_kms *kms);
+
+#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
new file mode 100644
index 0000000..74cc204
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -0,0 +1,1345 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+
+#include "dpu_kms.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_vbif.h"
+#include "dpu_encoder.h"
+#include "dpu_plane.h"
+#include "dpu_crtc.h"
+
+#define CREATE_TRACE_POINTS
+#include "dpu_trace.h"
+
+static const char * const iommu_ports[] = {
+		"mdp_0",
+};
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
+ *
+ * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
+ */
+#define DPU_DEBUGFS_DIR "msm_dpu"
+#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+static int dpu_kms_hw_init(struct msm_kms *kms);
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
+
+static unsigned long dpu_iomap_size(struct platform_device *pdev,
+				    const char *name)
+{
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+	if (!res) {
+		DRM_ERROR("failed to get memory resource: %s\n", name);
+		return 0;
+	}
+
+	return resource_size(res);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_danger_signal_status(struct seq_file *s,
+		bool danger_status)
+{
+	struct dpu_kms *kms = (struct dpu_kms *)s->private;
+	struct msm_drm_private *priv;
+	struct dpu_danger_safe_status status;
+	int i;
+
+	if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+		DPU_ERROR("invalid arg(s)\n");
+		return 0;
+	}
+
+	priv = kms->dev->dev_private;
+	memset(&status, 0, sizeof(struct dpu_danger_safe_status));
+
+	pm_runtime_get_sync(&kms->pdev->dev);
+	if (danger_status) {
+		seq_puts(s, "\nDanger signal status:\n");
+		if (kms->hw_mdp->ops.get_danger_status)
+			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+					&status);
+	} else {
+		seq_puts(s, "\nSafe signal status:\n");
+		if (kms->hw_mdp->ops.get_danger_status)
+			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+					&status);
+	}
+	pm_runtime_put_sync(&kms->pdev->dev);
+
+	seq_printf(s, "MDP     :  0x%x\n", status.mdp);
+
+	for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+		seq_printf(s, "SSPP%d   :  0x%x  \t", i - SSPP_VIG0,
+				status.sspp[i]);
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)				\
+static int __prefix ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __prefix ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __prefix ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __prefix ## _open,					\
+	.release = single_release,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+}
+
+static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+	return _dpu_danger_signal_status(s, true);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
+
+static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+	return _dpu_danger_signal_status(s, false);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
+
+static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
+{
+	debugfs_remove_recursive(dpu_kms->debugfs_danger);
+	dpu_kms->debugfs_danger = NULL;
+}
+
+static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+		struct dentry *parent)
+{
+	dpu_kms->debugfs_danger = debugfs_create_dir("danger",
+			parent);
+	if (!dpu_kms->debugfs_danger) {
+		DPU_ERROR("failed to create danger debugfs\n");
+		return -EINVAL;
+	}
+
+	debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+			dpu_kms, &dpu_debugfs_danger_stats_fops);
+	debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+			dpu_kms, &dpu_debugfs_safe_stats_fops);
+
+	return 0;
+}
+
+static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
+{
+	struct dpu_debugfs_regset32 *regset;
+	struct dpu_kms *dpu_kms;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	void __iomem *base;
+	uint32_t i, addr;
+
+	if (!s || !s->private)
+		return 0;
+
+	regset = s->private;
+
+	dpu_kms = regset->dpu_kms;
+	if (!dpu_kms || !dpu_kms->mmio)
+		return 0;
+
+	dev = dpu_kms->dev;
+	if (!dev)
+		return 0;
+
+	priv = dev->dev_private;
+	if (!priv)
+		return 0;
+
+	base = dpu_kms->mmio + regset->offset;
+
+	/* insert padding spaces, if needed */
+	if (regset->offset & 0xF) {
+		seq_printf(s, "[%x]", regset->offset & ~0xF);
+		for (i = 0; i < (regset->offset & 0xF); i += 4)
+			seq_puts(s, "         ");
+	}
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+	/* main register output */
+	for (i = 0; i < regset->blk_len; i += 4) {
+		addr = regset->offset + i;
+		if ((addr & 0xF) == 0x0)
+			seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+		seq_printf(s, " %08x", readl_relaxed(base + i));
+	}
+	seq_puts(s, "\n");
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	return 0;
+}
+
+static int dpu_debugfs_open_regset32(struct inode *inode,
+		struct file *file)
+{
+	return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations dpu_fops_regset32 = {
+	.open =		dpu_debugfs_open_regset32,
+	.read =		seq_read,
+	.llseek =	seq_lseek,
+	.release =	single_release,
+};
+
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+		uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
+{
+	if (regset) {
+		regset->offset = offset;
+		regset->blk_len = length;
+		regset->dpu_kms = dpu_kms;
+	}
+}
+
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+		void *parent, struct dpu_debugfs_regset32 *regset)
+{
+	if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
+		return NULL;
+
+	/* make sure offset is a multiple of 4 */
+	regset->offset = round_down(regset->offset, 4);
+
+	return debugfs_create_file(name, mode, parent,
+			regset, &dpu_fops_regset32);
+}
+
+static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
+{
+	void *p;
+	int rc;
+
+	p = dpu_hw_util_get_log_mask_ptr();
+
+	if (!dpu_kms || !p)
+		return -EINVAL;
+
+	dpu_kms->debugfs_root = debugfs_create_dir("debug",
+					   dpu_kms->dev->primary->debugfs_root);
+	if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
+		DRM_ERROR("debugfs create_dir failed %ld\n",
+			  PTR_ERR(dpu_kms->debugfs_root));
+		return PTR_ERR(dpu_kms->debugfs_root);
+	}
+
+	rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
+	if (rc) {
+		DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
+		return rc;
+	}
+
+	/* allow root to be NULL */
+	debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
+
+	(void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
+	(void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
+	(void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
+
+	rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
+	if (rc) {
+		DPU_ERROR("failed to init perf %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+	/* don't need to NULL check debugfs_root */
+	if (dpu_kms) {
+		dpu_debugfs_vbif_destroy(dpu_kms);
+		dpu_debugfs_danger_destroy(dpu_kms);
+		dpu_debugfs_core_irq_destroy(dpu_kms);
+		debugfs_remove_recursive(dpu_kms->debugfs_root);
+	}
+}
+#else
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	return dpu_crtc_vblank(crtc, true);
+}
+
+static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	dpu_crtc_vblank(crtc, false);
+}
+
+static void dpu_kms_prepare_commit(struct msm_kms *kms,
+		struct drm_atomic_state *state)
+{
+	struct dpu_kms *dpu_kms;
+	struct msm_drm_private *priv;
+	struct drm_device *dev;
+	struct drm_encoder *encoder;
+
+	if (!kms)
+		return;
+	dpu_kms = to_dpu_kms(kms);
+	dev = dpu_kms->dev;
+
+	if (!dev || !dev->dev_private)
+		return;
+	priv = dev->dev_private;
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->crtc != NULL)
+			dpu_encoder_prepare_commit(encoder);
+}
+
+/*
+ * Override the encoder enable since we need to setup the inline rotator and do
+ * some crtc magic before enabling any bridge that might be present.
+ */
+void dpu_kms_encoder_enable(struct drm_encoder *encoder)
+{
+	const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
+	struct drm_crtc *crtc = encoder->crtc;
+
+	/* Forward this enable call to the commit hook */
+	if (funcs && funcs->commit)
+		funcs->commit(encoder);
+
+	if (crtc && crtc->state->active) {
+		trace_dpu_kms_enc_enable(DRMID(crtc));
+		dpu_crtc_commit_kickoff(crtc);
+	}
+}
+
+static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int i;
+
+	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+		/* If modeset is required, kickoff is run in encoder_enable */
+		if (drm_atomic_crtc_needs_modeset(crtc_state))
+			continue;
+
+		if (crtc->state->active) {
+			trace_dpu_kms_commit(DRMID(crtc));
+			dpu_crtc_commit_kickoff(crtc);
+		}
+	}
+}
+
+static void dpu_kms_complete_commit(struct msm_kms *kms,
+		struct drm_atomic_state *old_state)
+{
+	struct dpu_kms *dpu_kms;
+	struct msm_drm_private *priv;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	int i;
+
+	if (!kms || !old_state)
+		return;
+	dpu_kms = to_dpu_kms(kms);
+
+	if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
+		return;
+	priv = dpu_kms->dev->dev_private;
+
+	DPU_ATRACE_BEGIN("kms_complete_commit");
+
+	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
+		dpu_crtc_complete_commit(crtc, old_crtc_state);
+
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	DPU_ATRACE_END("kms_complete_commit");
+}
+
+static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
+		struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+	struct drm_device *dev;
+	int ret;
+
+	if (!kms || !crtc || !crtc->state) {
+		DPU_ERROR("invalid params\n");
+		return;
+	}
+
+	dev = crtc->dev;
+
+	if (!crtc->state->enable) {
+		DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+		return;
+	}
+
+	if (!crtc->state->active) {
+		DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+		return;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+		/*
+		 * Wait for post-flush if necessary to delay before
+		 * plane_cleanup. For example, wait for vsync in case of video
+		 * mode panels. This may be a no-op for command mode panels.
+		 */
+		trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
+		ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
+		if (ret && ret != -EWOULDBLOCK) {
+			DPU_ERROR("wait for commit done returned %d\n", ret);
+			break;
+		}
+	}
+}
+
+static void _dpu_kms_initialize_dsi(struct drm_device *dev,
+				    struct msm_drm_private *priv,
+				    struct dpu_kms *dpu_kms)
+{
+	struct drm_encoder *encoder = NULL;
+	int i, rc;
+
+	/*TODO: Support two independent DSI connectors */
+	encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
+	if (IS_ERR_OR_NULL(encoder)) {
+		DPU_ERROR("encoder init failed for dsi display\n");
+		return;
+	}
+
+	priv->encoders[priv->num_encoders++] = encoder;
+
+	for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+		if (!priv->dsi[i]) {
+			DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
+			return;
+		}
+
+		rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+		if (rc) {
+			DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+				i, rc);
+			continue;
+		}
+	}
+}
+
+/**
+ * _dpu_kms_setup_displays - create encoders, bridges and connectors
+ *                           for underlying displays
+ * @dev:        Pointer to drm device structure
+ * @priv:       Pointer to private drm device data
+ * @dpu_kms:    Pointer to dpu kms structure
+ * Returns:     Zero on success
+ */
+static void _dpu_kms_setup_displays(struct drm_device *dev,
+				    struct msm_drm_private *priv,
+				    struct dpu_kms *dpu_kms)
+{
+	_dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+
+	/**
+	 * Extend this function to initialize other
+	 * types of displays
+	 */
+}
+
+static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
+{
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return;
+	} else if (!dpu_kms->dev) {
+		DPU_ERROR("invalid dev\n");
+		return;
+	} else if (!dpu_kms->dev->dev_private) {
+		DPU_ERROR("invalid dev_private\n");
+		return;
+	}
+	priv = dpu_kms->dev->dev_private;
+
+	for (i = 0; i < priv->num_crtcs; i++)
+		priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+	priv->num_crtcs = 0;
+
+	for (i = 0; i < priv->num_planes; i++)
+		priv->planes[i]->funcs->destroy(priv->planes[i]);
+	priv->num_planes = 0;
+
+	for (i = 0; i < priv->num_connectors; i++)
+		priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+	priv->num_connectors = 0;
+
+	for (i = 0; i < priv->num_encoders; i++)
+		priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+	priv->num_encoders = 0;
+}
+
+static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
+{
+	struct drm_device *dev;
+	struct drm_plane *primary_planes[MAX_PLANES], *plane;
+	struct drm_crtc *crtc;
+
+	struct msm_drm_private *priv;
+	struct dpu_mdss_cfg *catalog;
+
+	int primary_planes_idx = 0, i, ret;
+	int max_crtc_count;
+
+	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return -EINVAL;
+	}
+
+	dev = dpu_kms->dev;
+	priv = dev->dev_private;
+	catalog = dpu_kms->catalog;
+
+	/*
+	 * Create encoder and query display drivers to create
+	 * bridges and connectors
+	 */
+	_dpu_kms_setup_displays(dev, priv, dpu_kms);
+
+	max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+
+	/* Create the planes */
+	for (i = 0; i < catalog->sspp_count; i++) {
+		bool primary = true;
+
+		if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
+			|| primary_planes_idx >= max_crtc_count)
+			primary = false;
+
+		plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
+				(1UL << max_crtc_count) - 1, 0);
+		if (IS_ERR(plane)) {
+			DPU_ERROR("dpu_plane_init failed\n");
+			ret = PTR_ERR(plane);
+			goto fail;
+		}
+		priv->planes[priv->num_planes++] = plane;
+
+		if (primary)
+			primary_planes[primary_planes_idx++] = plane;
+	}
+
+	max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+	/* Create one CRTC per encoder */
+	for (i = 0; i < max_crtc_count; i++) {
+		crtc = dpu_crtc_init(dev, primary_planes[i]);
+		if (IS_ERR(crtc)) {
+			ret = PTR_ERR(crtc);
+			goto fail;
+		}
+		priv->crtcs[priv->num_crtcs++] = crtc;
+	}
+
+	/* All CRTCs are compatible with all encoders */
+	for (i = 0; i < priv->num_encoders; i++)
+		priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+	return 0;
+fail:
+	_dpu_kms_drm_obj_destroy(dpu_kms);
+	return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+	struct drm_device *dev;
+	int rc;
+
+	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return -EINVAL;
+	}
+
+	dev = dpu_kms->dev;
+
+	rc = _dpu_debugfs_init(dpu_kms);
+	if (rc)
+		DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
+
+	return rc;
+}
+#endif
+
+static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+		struct drm_encoder *encoder)
+{
+	return rate;
+}
+
+static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
+{
+	struct drm_device *dev;
+	int i;
+
+	dev = dpu_kms->dev;
+	if (!dev)
+		return;
+
+	if (dpu_kms->hw_intr)
+		dpu_hw_intr_destroy(dpu_kms->hw_intr);
+	dpu_kms->hw_intr = NULL;
+
+	if (dpu_kms->power_event)
+		dpu_power_handle_unregister_event(
+				&dpu_kms->phandle, dpu_kms->power_event);
+
+	/* safe to call these more than once during shutdown */
+	_dpu_debugfs_destroy(dpu_kms);
+	_dpu_kms_mmu_destroy(dpu_kms);
+
+	if (dpu_kms->catalog) {
+		for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+			u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+			if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
+				dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+		}
+	}
+
+	if (dpu_kms->rm_init)
+		dpu_rm_destroy(&dpu_kms->rm);
+	dpu_kms->rm_init = false;
+
+	if (dpu_kms->catalog)
+		dpu_hw_catalog_deinit(dpu_kms->catalog);
+	dpu_kms->catalog = NULL;
+
+	if (dpu_kms->core_client)
+		dpu_power_client_destroy(&dpu_kms->phandle,
+			dpu_kms->core_client);
+	dpu_kms->core_client = NULL;
+
+	if (dpu_kms->vbif[VBIF_NRT])
+		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
+	dpu_kms->vbif[VBIF_NRT] = NULL;
+
+	if (dpu_kms->vbif[VBIF_RT])
+		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
+	dpu_kms->vbif[VBIF_RT] = NULL;
+
+	if (dpu_kms->mmio)
+		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
+	dpu_kms->mmio = NULL;
+}
+
+static void dpu_kms_destroy(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms;
+
+	if (!kms) {
+		DPU_ERROR("invalid kms\n");
+		return;
+	}
+
+	dpu_kms = to_dpu_kms(kms);
+
+	dpu_dbg_destroy();
+	_dpu_kms_hw_destroy(dpu_kms);
+}
+
+static int dpu_kms_pm_suspend(struct device *dev)
+{
+	struct drm_device *ddev;
+	struct drm_modeset_acquire_ctx ctx;
+	struct drm_atomic_state *state;
+	struct dpu_kms *dpu_kms;
+	int ret = 0, num_crtcs = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev_to_msm_kms(ddev))
+		return -EINVAL;
+
+	dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+	/* disable hot-plug polling */
+	drm_kms_helper_poll_disable(ddev);
+
+	/* acquire modeset lock(s) */
+	drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+	DPU_ATRACE_BEGIN("kms_pm_suspend");
+
+	ret = drm_modeset_lock_all_ctx(ddev, &ctx);
+	if (ret)
+		goto unlock;
+
+	/* save current state for resume */
+	if (dpu_kms->suspend_state)
+		drm_atomic_state_put(dpu_kms->suspend_state);
+	dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
+	if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
+		DRM_ERROR("failed to back up suspend state\n");
+		dpu_kms->suspend_state = NULL;
+		goto unlock;
+	}
+
+	/* create atomic state to disable all CRTCs */
+	state = drm_atomic_state_alloc(ddev);
+	if (IS_ERR_OR_NULL(state)) {
+		DRM_ERROR("failed to allocate crtc disable state\n");
+		goto unlock;
+	}
+
+	state->acquire_ctx = &ctx;
+
+	/* check for nothing to do */
+	if (num_crtcs == 0) {
+		DRM_DEBUG("all crtcs are already in the off state\n");
+		drm_atomic_state_put(state);
+		goto suspended;
+	}
+
+	/* commit the "disable all" state */
+	ret = drm_atomic_commit(state);
+	if (ret < 0) {
+		DRM_ERROR("failed to disable crtcs, %d\n", ret);
+		drm_atomic_state_put(state);
+		goto unlock;
+	}
+
+suspended:
+	dpu_kms->suspend_block = true;
+
+unlock:
+	if (ret == -EDEADLK) {
+		drm_modeset_backoff(&ctx);
+		goto retry;
+	}
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+
+	DPU_ATRACE_END("kms_pm_suspend");
+	return 0;
+}
+
+static int dpu_kms_pm_resume(struct device *dev)
+{
+	struct drm_device *ddev;
+	struct dpu_kms *dpu_kms;
+	int ret;
+
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev_to_msm_kms(ddev))
+		return -EINVAL;
+
+	dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+	DPU_ATRACE_BEGIN("kms_pm_resume");
+
+	drm_mode_config_reset(ddev);
+
+	drm_modeset_lock_all(ddev);
+
+	dpu_kms->suspend_block = false;
+
+	if (dpu_kms->suspend_state) {
+		dpu_kms->suspend_state->acquire_ctx =
+			ddev->mode_config.acquire_ctx;
+		ret = drm_atomic_commit(dpu_kms->suspend_state);
+		if (ret < 0) {
+			DRM_ERROR("failed to restore state, %d\n", ret);
+			drm_atomic_state_put(dpu_kms->suspend_state);
+		}
+		dpu_kms->suspend_state = NULL;
+	}
+	drm_modeset_unlock_all(ddev);
+
+	/* enable hot-plug polling */
+	drm_kms_helper_poll_enable(ddev);
+
+	DPU_ATRACE_END("kms_pm_resume");
+	return 0;
+}
+
+static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
+				 struct drm_encoder *encoder,
+				 bool cmd_mode)
+{
+	struct msm_display_info info;
+	struct msm_drm_private *priv = encoder->dev->dev_private;
+	int i, rc = 0;
+
+	memset(&info, 0, sizeof(info));
+
+	info.intf_type = encoder->encoder_type;
+	info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
+			MSM_DISPLAY_CAP_VID_MODE;
+
+	/* TODO: No support for DSI swap */
+	for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+		if (priv->dsi[i]) {
+			info.h_tile_instance[info.num_of_h_tiles] = i;
+			info.num_of_h_tiles++;
+		}
+	}
+
+	rc = dpu_encoder_setup(encoder->dev, encoder, &info);
+	if (rc)
+		DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+			encoder->base.id, rc);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+	.hw_init         = dpu_kms_hw_init,
+	.irq_preinstall  = dpu_irq_preinstall,
+	.irq_postinstall = dpu_irq_postinstall,
+	.irq_uninstall   = dpu_irq_uninstall,
+	.irq             = dpu_irq,
+	.prepare_commit  = dpu_kms_prepare_commit,
+	.commit          = dpu_kms_commit,
+	.complete_commit = dpu_kms_complete_commit,
+	.wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
+	.enable_vblank   = dpu_kms_enable_vblank,
+	.disable_vblank  = dpu_kms_disable_vblank,
+	.check_modified_format = dpu_format_check_modified_format,
+	.get_format      = dpu_get_msm_format,
+	.round_pixclk    = dpu_kms_round_pixclk,
+	.pm_suspend      = dpu_kms_pm_suspend,
+	.pm_resume       = dpu_kms_pm_resume,
+	.destroy         = dpu_kms_destroy,
+	.set_encoder_mode = _dpu_kms_set_encoder_mode,
+#ifdef CONFIG_DEBUG_FS
+	.debugfs_init    = dpu_kms_debugfs_init,
+#endif
+};
+
+/* the caller api needs to turn on clock before calling it */
+static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
+{
+	dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
+}
+
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
+{
+	struct msm_mmu *mmu;
+
+	mmu = dpu_kms->base.aspace->mmu;
+
+	mmu->funcs->detach(mmu, (const char **)iommu_ports,
+			ARRAY_SIZE(iommu_ports));
+	msm_gem_address_space_put(dpu_kms->base.aspace);
+
+	return 0;
+}
+
+static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
+{
+	struct iommu_domain *domain;
+	struct msm_gem_address_space *aspace;
+	int ret;
+
+	domain = iommu_domain_alloc(&platform_bus_type);
+	if (!domain)
+		return 0;
+
+	aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
+			domain, "dpu1");
+	if (IS_ERR(aspace)) {
+		ret = PTR_ERR(aspace);
+		goto fail;
+	}
+
+	dpu_kms->base.aspace = aspace;
+
+	ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
+			ARRAY_SIZE(iommu_ports));
+	if (ret) {
+		DPU_ERROR("failed to attach iommu %d\n", ret);
+		msm_gem_address_space_put(aspace);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	_dpu_kms_mmu_destroy(dpu_kms);
+
+	return ret;
+}
+
+static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
+		char *clock_name)
+{
+	struct dss_module_power *mp = &dpu_kms->mp;
+	int i;
+
+	for (i = 0; i < mp->num_clk; i++) {
+		if (!strcmp(mp->clk_config[i].clk_name, clock_name))
+			return &mp->clk_config[i];
+	}
+
+	return NULL;
+}
+
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
+{
+	struct dss_clk *clk;
+
+	clk = _dpu_kms_get_clk(dpu_kms, clock_name);
+	if (!clk)
+		return -EINVAL;
+
+	return clk_get_rate(clk->clk);
+}
+
+static void dpu_kms_handle_power_event(u32 event_type, void *usr)
+{
+	struct dpu_kms *dpu_kms = usr;
+
+	if (!dpu_kms)
+		return;
+
+	if (event_type == DPU_POWER_EVENT_POST_ENABLE)
+		dpu_vbif_init_memtypes(dpu_kms);
+}
+
+static int dpu_kms_hw_init(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	int i, rc = -EINVAL;
+
+	if (!kms) {
+		DPU_ERROR("invalid kms\n");
+		goto end;
+	}
+
+	dpu_kms = to_dpu_kms(kms);
+	dev = dpu_kms->dev;
+	if (!dev) {
+		DPU_ERROR("invalid device\n");
+		goto end;
+	}
+
+	rc = dpu_dbg_init(&dpu_kms->pdev->dev);
+	if (rc) {
+		DRM_ERROR("failed to init dpu dbg: %d\n", rc);
+		goto end;
+	}
+
+	priv = dev->dev_private;
+	if (!priv) {
+		DPU_ERROR("invalid private data\n");
+		goto dbg_destroy;
+	}
+
+	dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
+	if (IS_ERR(dpu_kms->mmio)) {
+		rc = PTR_ERR(dpu_kms->mmio);
+		DPU_ERROR("mdp register memory map failed: %d\n", rc);
+		dpu_kms->mmio = NULL;
+		goto error;
+	}
+	DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+	dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
+
+	dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
+	if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+		rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+		DPU_ERROR("vbif register memory map failed: %d\n", rc);
+		dpu_kms->vbif[VBIF_RT] = NULL;
+		goto error;
+	}
+	dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
+	dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
+	if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+		dpu_kms->vbif[VBIF_NRT] = NULL;
+		DPU_DEBUG("VBIF NRT is not defined");
+	} else {
+		dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
+							     "vbif_nrt");
+	}
+
+	dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
+	if (IS_ERR(dpu_kms->reg_dma)) {
+		dpu_kms->reg_dma = NULL;
+		DPU_DEBUG("REG_DMA is not defined");
+	} else {
+		dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
+	}
+
+	dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
+					"core");
+	if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
+		rc = PTR_ERR(dpu_kms->core_client);
+		if (!dpu_kms->core_client)
+			rc = -EINVAL;
+		DPU_ERROR("dpu power client create failed: %d\n", rc);
+		dpu_kms->core_client = NULL;
+		goto error;
+	}
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+	_dpu_kms_core_hw_rev_init(dpu_kms);
+
+	pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
+
+	dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
+	if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
+		rc = PTR_ERR(dpu_kms->catalog);
+		if (!dpu_kms->catalog)
+			rc = -EINVAL;
+		DPU_ERROR("catalog init failed: %d\n", rc);
+		dpu_kms->catalog = NULL;
+		goto power_error;
+	}
+
+	dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
+
+	/*
+	 * Now we need to read the HW catalog and initialize resources such as
+	 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+	 */
+	rc = _dpu_kms_mmu_init(dpu_kms);
+	if (rc) {
+		DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
+		goto power_error;
+	}
+
+	rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
+			dpu_kms->dev);
+	if (rc) {
+		DPU_ERROR("rm init failed: %d\n", rc);
+		goto power_error;
+	}
+
+	dpu_kms->rm_init = true;
+
+	dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
+	if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
+		rc = PTR_ERR(dpu_kms->hw_mdp);
+		if (!dpu_kms->hw_mdp)
+			rc = -EINVAL;
+		DPU_ERROR("failed to get hw_mdp: %d\n", rc);
+		dpu_kms->hw_mdp = NULL;
+		goto power_error;
+	}
+
+	for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+		u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+		dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+				dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
+		if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
+			rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
+			if (!dpu_kms->hw_vbif[vbif_idx])
+				rc = -EINVAL;
+			DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
+			dpu_kms->hw_vbif[vbif_idx] = NULL;
+			goto power_error;
+		}
+	}
+
+	rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
+			&dpu_kms->phandle,
+			_dpu_kms_get_clk(dpu_kms, "core"));
+	if (rc) {
+		DPU_ERROR("failed to init perf %d\n", rc);
+		goto perf_err;
+	}
+
+	dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
+	if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+		rc = PTR_ERR(dpu_kms->hw_intr);
+		DPU_ERROR("hw_intr init failed: %d\n", rc);
+		dpu_kms->hw_intr = NULL;
+		goto hw_intr_init_err;
+	}
+
+	/*
+	 * _dpu_kms_drm_obj_init should create the DRM related objects
+	 * i.e. CRTCs, planes, encoders, connectors and so forth
+	 */
+	rc = _dpu_kms_drm_obj_init(dpu_kms);
+	if (rc) {
+		DPU_ERROR("modeset init failed: %d\n", rc);
+		goto drm_obj_init_err;
+	}
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	/*
+	 * max crtc width is equal to the max mixer width * 2 and max height is
+	 * is 4K
+	 */
+	dev->mode_config.max_width =
+			dpu_kms->catalog->caps->max_mixer_width * 2;
+	dev->mode_config.max_height = 4096;
+
+	/*
+	 * Support format modifiers for compression etc.
+	 */
+	dev->mode_config.allow_fb_modifiers = true;
+
+	/*
+	 * Handle (re)initializations during power enable
+	 */
+	dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
+	dpu_kms->power_event = dpu_power_handle_register_event(
+			&dpu_kms->phandle,
+			DPU_POWER_EVENT_POST_ENABLE,
+			dpu_kms_handle_power_event, dpu_kms, "kms");
+
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	return 0;
+
+drm_obj_init_err:
+	dpu_core_perf_destroy(&dpu_kms->perf);
+hw_intr_init_err:
+perf_err:
+power_error:
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+error:
+	_dpu_kms_hw_destroy(dpu_kms);
+dbg_destroy:
+	dpu_dbg_destroy();
+end:
+	return rc;
+}
+
+struct msm_kms *dpu_kms_init(struct drm_device *dev)
+{
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+	int irq;
+
+	if (!dev || !dev->dev_private) {
+		DPU_ERROR("drm device node invalid\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	priv = dev->dev_private;
+	dpu_kms = to_dpu_kms(priv->kms);
+
+	irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
+	if (irq < 0) {
+		DPU_ERROR("failed to get irq: %d\n", irq);
+		return ERR_PTR(irq);
+	}
+	dpu_kms->base.irq = irq;
+
+	return &dpu_kms->base;
+}
+
+static int dpu_bind(struct device *dev, struct device *master, void *data)
+{
+	struct drm_device *ddev = dev_get_drvdata(master);
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_drm_private *priv = ddev->dev_private;
+	struct dpu_kms *dpu_kms;
+	struct dss_module_power *mp;
+	int ret = 0;
+
+	dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
+	if (!dpu_kms)
+		return -ENOMEM;
+
+	mp = &dpu_kms->mp;
+	ret = msm_dss_parse_clock(pdev, mp);
+	if (ret) {
+		DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+		return ret;
+	}
+
+	dpu_power_resource_init(pdev, &dpu_kms->phandle);
+
+	platform_set_drvdata(pdev, dpu_kms);
+
+	msm_kms_init(&dpu_kms->base, &kms_funcs);
+	dpu_kms->dev = ddev;
+	dpu_kms->pdev = pdev;
+
+	pm_runtime_enable(&pdev->dev);
+	dpu_kms->rpm_enabled = true;
+
+	priv->kms = &dpu_kms->base;
+	return ret;
+}
+
+static void dpu_unbind(struct device *dev, struct device *master, void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+	struct dss_module_power *mp = &dpu_kms->mp;
+
+	dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+	devm_kfree(&pdev->dev, mp->clk_config);
+	mp->num_clk = 0;
+
+	if (dpu_kms->rpm_enabled)
+		pm_runtime_disable(&pdev->dev);
+}
+
+static const struct component_ops dpu_ops = {
+	.bind   = dpu_bind,
+	.unbind = dpu_unbind,
+};
+
+static int dpu_dev_probe(struct platform_device *pdev)
+{
+	return component_add(&pdev->dev, &dpu_ops);
+}
+
+static int dpu_dev_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &dpu_ops);
+	return 0;
+}
+
+static int __maybe_unused dpu_runtime_suspend(struct device *dev)
+{
+	int rc = -1;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+	struct drm_device *ddev;
+	struct dss_module_power *mp = &dpu_kms->mp;
+
+	ddev = dpu_kms->dev;
+	if (!ddev) {
+		DPU_ERROR("invalid drm_device\n");
+		goto exit;
+	}
+
+	rc = dpu_power_resource_enable(&dpu_kms->phandle,
+			dpu_kms->core_client, false);
+	if (rc)
+		DPU_ERROR("resource disable failed: %d\n", rc);
+
+	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+	if (rc)
+		DPU_ERROR("clock disable failed rc:%d\n", rc);
+
+exit:
+	return rc;
+}
+
+static int __maybe_unused dpu_runtime_resume(struct device *dev)
+{
+	int rc = -1;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+	struct drm_device *ddev;
+	struct dss_module_power *mp = &dpu_kms->mp;
+
+	ddev = dpu_kms->dev;
+	if (!ddev) {
+		DPU_ERROR("invalid drm_device\n");
+		goto exit;
+	}
+
+	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+	if (rc) {
+		DPU_ERROR("clock enable failed rc:%d\n", rc);
+		goto exit;
+	}
+
+	rc = dpu_power_resource_enable(&dpu_kms->phandle,
+			dpu_kms->core_client, true);
+	if (rc)
+		DPU_ERROR("resource enable failed: %d\n", rc);
+
+exit:
+	return rc;
+}
+
+static const struct dev_pm_ops dpu_pm_ops = {
+	SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+};
+
+static const struct of_device_id dpu_dt_match[] = {
+	{ .compatible = "qcom,sdm845-dpu", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dpu_dt_match);
+
+static struct platform_driver dpu_driver = {
+	.probe = dpu_dev_probe,
+	.remove = dpu_dev_remove,
+	.driver = {
+		.name = "msm_dpu",
+		.of_match_table = dpu_dt_match,
+		.pm = &dpu_pm_ops,
+	},
+};
+
+void __init msm_dpu_register(void)
+{
+	platform_driver_register(&dpu_driver);
+}
+
+void __exit msm_dpu_unregister(void)
+{
+	platform_driver_unregister(&dpu_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
new file mode 100644
index 0000000..66d4666
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_KMS_H__
+#define __DPU_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "dpu_dbg.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_top.h"
+#include "dpu_rm.h"
+#include "dpu_power_handle.h"
+#include "dpu_irq.h"
+#include "dpu_core_perf.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * DPU_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG(fmt, ...)                                                \
+	do {                                                               \
+		if (unlikely(drm_debug & DRM_UT_KMS))                      \
+			DRM_DEBUG(fmt, ##__VA_ARGS__); \
+		else                                                       \
+			pr_debug(fmt, ##__VA_ARGS__);                      \
+	} while (0)
+
+/**
+ * DPU_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG_DRIVER(fmt, ...)                                         \
+	do {                                                               \
+		if (unlikely(drm_debug & DRM_UT_DRIVER))                   \
+			DRM_ERROR(fmt, ##__VA_ARGS__); \
+		else                                                       \
+			pr_debug(fmt, ##__VA_ARGS__);                      \
+	} while (0)
+
+#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ *	This macro is similar to the standard ktime_compare() function, but
+ *	attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+	ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+#define DPU_NAME_SIZE  12
+
+/* timeout in frames waiting for frame done */
+#define DPU_FRAME_DONE_TIMEOUT	60
+
+/*
+ * struct dpu_irq_callback - IRQ callback handlers
+ * @list: list to callback
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct dpu_irq_callback {
+	struct list_head list;
+	void (*func)(void *arg, int irq_idx);
+	void *arg;
+};
+
+/**
+ * struct dpu_irq: IRQ structure contains callback registration info
+ * @total_irq:    total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl:   array of IRQ callbacks setting
+ * @enable_counts array of IRQ enable counts
+ * @cb_lock:      callback lock
+ * @debugfs_file: debugfs file for irq statistics
+ */
+struct dpu_irq {
+	u32 total_irqs;
+	struct list_head *irq_cb_tbl;
+	atomic_t *enable_counts;
+	atomic_t *irq_counts;
+	spinlock_t cb_lock;
+	struct dentry *debugfs_file;
+};
+
+struct dpu_kms {
+	struct msm_kms base;
+	struct drm_device *dev;
+	int core_rev;
+	struct dpu_mdss_cfg *catalog;
+
+	struct dpu_power_handle phandle;
+	struct dpu_power_client *core_client;
+	struct dpu_power_event *power_event;
+
+	/* directory entry for debugfs */
+	struct dentry *debugfs_root;
+	struct dentry *debugfs_danger;
+	struct dentry *debugfs_vbif;
+
+	/* io/register spaces: */
+	void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
+	unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
+
+	struct regulator *vdd;
+	struct regulator *mmagic;
+	struct regulator *venus;
+
+	struct dpu_hw_intr *hw_intr;
+	struct dpu_irq irq_obj;
+
+	struct dpu_core_perf perf;
+
+	/* saved atomic state during system suspend */
+	struct drm_atomic_state *suspend_state;
+	bool suspend_block;
+
+	struct dpu_rm rm;
+	bool rm_init;
+
+	struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
+	struct dpu_hw_mdp *hw_mdp;
+
+	bool has_danger_ctrl;
+
+	struct platform_device *pdev;
+	bool rpm_enabled;
+	struct dss_module_power mp;
+};
+
+struct vsync_info {
+	u32 frame_count;
+	u32 line_count;
+};
+
+#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+
+/* get struct msm_kms * from drm_device * */
+#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
+		((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
+
+/**
+ * dpu_kms_is_suspend_state - whether or not the system is pm suspended
+ * @dev: Pointer to drm device
+ * Return: Suspend status
+ */
+static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
+{
+	if (!ddev_to_msm_kms(dev))
+		return false;
+
+	return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
+}
+
+/**
+ * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
+ *				suspend status
+ * @dev: Pointer to drm device
+ * Return: True if commits should be rejected due to pm suspend
+ */
+static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
+{
+	if (!dpu_kms_is_suspend_state(dev))
+		return false;
+
+	return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
+}
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.txt
+ *
+ * @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
+ * @dpu_debugfs_create_regset32: Create 32-bit register dump file
+ * @dpu_debugfs_get_root: Get root dentry for DPU_KMS's debugfs node
+ */
+
+/**
+ * Companion structure for dpu_debugfs_create_regset32. Do not initialize the
+ * members of this structure explicitly; use dpu_debugfs_setup_regset32 instead.
+ */
+struct dpu_debugfs_regset32 {
+	uint32_t offset;
+	uint32_t blk_len;
+	struct dpu_kms *dpu_kms;
+};
+
+/**
+ * dpu_debugfs_setup_regset32 - Initialize register block definition for debugfs
+ * This function is meant to initialize dpu_debugfs_regset32 structures for use
+ * with dpu_debugfs_create_regset32.
+ * @regset: opaque register definition structure
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
+ */
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+		uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * Similar to the related debugfs_create_regset32 API, the structure pointed to
+ * by regset needs to persist for the lifetime of the created file. The calling
+ * code is responsible for initialization/management of this structure.
+ *
+ * The structure pointed to by regset is meant to be opaque. Please use
+ * dpu_debugfs_setup_regset32 to initialize it.
+ *
+ * @name:   File name within debugfs
+ * @mode:   File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @regset: Pointer to persistent register block definition
+ *
+ * Return: dentry pointer for newly created file, use either debugfs_remove()
+ *         or debugfs_remove_recursive() (on a parent directory) to remove the
+ *         file
+ */
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+		void *parent, struct dpu_debugfs_regset32 *regset);
+
+/**
+ * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @dpu_kms: Pointer to DPU's KMS structure
+ *
+ * Return: dentry pointer for DPU's debugfs location
+ */
+void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
+
+/**
+ * DPU info management functions
+ * These functions/definitions allow for building up a 'dpu_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define DPU_KMS_INFO_MAX_SIZE	4096
+
+/**
+ * Vblank enable/disable functions
+ */
+int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+void dpu_kms_encoder_enable(struct drm_encoder *encoder);
+
+/**
+ * dpu_kms_get_clk_rate() - get the clock rate
+ * @dpu_kms:  poiner to dpu_kms structure
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
+
+#endif /* __dpu_kms_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
new file mode 100644
index 0000000..9e533b8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -0,0 +1,245 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include "dpu_kms.h"
+
+#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
+
+#define HW_INTR_STATUS			0x0010
+
+struct dpu_mdss {
+	struct msm_mdss base;
+	void __iomem *mmio;
+	unsigned long mmio_len;
+	u32 hwversion;
+	struct dss_module_power mp;
+	struct dpu_irq_controller irq_controller;
+};
+
+static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+{
+	struct dpu_mdss *dpu_mdss = arg;
+	u32 interrupts;
+
+	interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
+
+	while (interrupts) {
+		irq_hw_number_t hwirq = fls(interrupts) - 1;
+		unsigned int mapping;
+		int rc;
+
+		mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
+					   hwirq);
+		if (mapping == 0) {
+			DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
+			return IRQ_NONE;
+		}
+
+		rc = generic_handle_irq(mapping);
+		if (rc < 0) {
+			DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
+				  hwirq, mapping, rc);
+			return IRQ_NONE;
+		}
+
+		interrupts &= ~(1 << hwirq);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void dpu_mdss_irq_mask(struct irq_data *irqd)
+{
+	struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+	/* memory barrier */
+	smp_mb__before_atomic();
+	clear_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+	/* memory barrier */
+	smp_mb__after_atomic();
+}
+
+static void dpu_mdss_irq_unmask(struct irq_data *irqd)
+{
+	struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+	/* memory barrier */
+	smp_mb__before_atomic();
+	set_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+	/* memory barrier */
+	smp_mb__after_atomic();
+}
+
+static struct irq_chip dpu_mdss_irq_chip = {
+	.name = "dpu_mdss",
+	.irq_mask = dpu_mdss_irq_mask,
+	.irq_unmask = dpu_mdss_irq_unmask,
+};
+
+static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
+		unsigned int irq, irq_hw_number_t hwirq)
+{
+	struct dpu_mdss *dpu_mdss = domain->host_data;
+	int ret;
+
+	irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
+	ret = irq_set_chip_data(irq, dpu_mdss);
+
+	return ret;
+}
+
+static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
+	.map = dpu_mdss_irqdomain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
+{
+	struct device *dev;
+	struct irq_domain *domain;
+
+	dev = dpu_mdss->base.dev->dev;
+
+	domain = irq_domain_add_linear(dev->of_node, 32,
+			&dpu_mdss_irqdomain_ops, dpu_mdss);
+	if (!domain) {
+		DPU_ERROR("failed to add irq_domain\n");
+		return -EINVAL;
+	}
+
+	dpu_mdss->irq_controller.enabled_mask = 0;
+	dpu_mdss->irq_controller.domain = domain;
+
+	return 0;
+}
+
+static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+{
+	if (dpu_mdss->irq_controller.domain) {
+		irq_domain_remove(dpu_mdss->irq_controller.domain);
+		dpu_mdss->irq_controller.domain = NULL;
+	}
+	return 0;
+}
+static int dpu_mdss_enable(struct msm_mdss *mdss)
+{
+	struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+	struct dss_module_power *mp = &dpu_mdss->mp;
+	int ret;
+
+	ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+	if (ret)
+		DPU_ERROR("clock enable failed, ret:%d\n", ret);
+
+	return ret;
+}
+
+static int dpu_mdss_disable(struct msm_mdss *mdss)
+{
+	struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+	struct dss_module_power *mp = &dpu_mdss->mp;
+	int ret;
+
+	ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+	if (ret)
+		DPU_ERROR("clock disable failed, ret:%d\n", ret);
+
+	return ret;
+}
+
+static void dpu_mdss_destroy(struct drm_device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev->dev);
+	struct msm_drm_private *priv = dev->dev_private;
+	struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
+	struct dss_module_power *mp = &dpu_mdss->mp;
+
+	_dpu_mdss_irq_domain_fini(dpu_mdss);
+
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+	devm_kfree(&pdev->dev, mp->clk_config);
+
+	if (dpu_mdss->mmio)
+		devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+	dpu_mdss->mmio = NULL;
+
+	pm_runtime_disable(dev->dev);
+	priv->mdss = NULL;
+}
+
+static const struct msm_mdss_funcs mdss_funcs = {
+	.enable	= dpu_mdss_enable,
+	.disable = dpu_mdss_disable,
+	.destroy = dpu_mdss_destroy,
+};
+
+int dpu_mdss_init(struct drm_device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev->dev);
+	struct msm_drm_private *priv = dev->dev_private;
+	struct resource *res;
+	struct dpu_mdss *dpu_mdss;
+	struct dss_module_power *mp;
+	int ret = 0;
+
+	dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
+	if (!dpu_mdss)
+		return -ENOMEM;
+
+	dpu_mdss->mmio = msm_ioremap(pdev, "mdss", "mdss");
+	if (IS_ERR(dpu_mdss->mmio))
+		return PTR_ERR(dpu_mdss->mmio);
+
+	DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdss");
+	if (!res) {
+		DRM_ERROR("failed to get memory resource for mdss\n");
+		return -ENOMEM;
+	}
+	dpu_mdss->mmio_len = resource_size(res);
+
+	mp = &dpu_mdss->mp;
+	ret = msm_dss_parse_clock(pdev, mp);
+	if (ret) {
+		DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+		goto clk_parse_err;
+	}
+
+	dpu_mdss->base.dev = dev;
+	dpu_mdss->base.funcs = &mdss_funcs;
+
+	ret = _dpu_mdss_irq_domain_add(dpu_mdss);
+	if (ret)
+		goto irq_domain_error;
+
+	ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+			dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
+	if (ret) {
+		DPU_ERROR("failed to init irq: %d\n", ret);
+		goto irq_error;
+	}
+
+	pm_runtime_enable(dev->dev);
+
+	pm_runtime_get_sync(dev->dev);
+	dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
+	pm_runtime_put_sync(dev->dev);
+
+	priv->mdss = &dpu_mdss->base;
+
+	return ret;
+
+irq_error:
+	_dpu_mdss_irq_domain_fini(dpu_mdss);
+irq_domain_error:
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+clk_parse_err:
+	devm_kfree(&pdev->dev, mp->clk_config);
+	if (dpu_mdss->mmio)
+		devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+	dpu_mdss->mmio = NULL;
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
new file mode 100644
index 0000000..4ac2b0c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -0,0 +1,1971 @@
+/*
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_vbif.h"
+#include "dpu_plane.h"
+
+#define DPU_DEBUG_PLANE(pl, fmt, ...) DPU_DEBUG("plane%d " fmt,\
+		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
+		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT	21
+#define PHASE_STEP_UNIT_SCALE   ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL		15
+
+#define SHARP_STRENGTH_DEFAULT	32
+#define SHARP_EDGE_THR_DEFAULT	112
+#define SHARP_SMOOTH_THR_DEFAULT	8
+#define SHARP_NOISE_THR_DEFAULT	2
+
+#define DPU_NAME_SIZE  12
+
+#define DPU_PLANE_COLOR_FILL_FLAG	BIT(31)
+#define DPU_ZPOS_MAX 255
+
+/* multirect rect index */
+enum {
+	R0,
+	R1,
+	R_MAX
+};
+
+#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
+#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+
+#define DEFAULT_REFRESH_RATE	60
+
+/**
+ * enum dpu_plane_qos - Different qos configurations for each pipe
+ *
+ * @DPU_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @DPU_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ *	this configuration is mutually exclusive from VBLANK_CTRL.
+ * @DPU_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum dpu_plane_qos {
+	DPU_PLANE_QOS_VBLANK_CTRL = BIT(0),
+	DPU_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
+	DPU_PLANE_QOS_PANIC_CTRL = BIT(2),
+};
+
+/*
+ * struct dpu_plane - local dpu plane structure
+ * @aspace: address space pointer
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
+ * @mplane_list: List of multirect planes of the same pipe
+ * @catalog: Points to dpu catalog structure
+ * @revalidate: force revalidation of all the plane properties
+ */
+struct dpu_plane {
+	struct drm_plane base;
+
+	struct mutex lock;
+
+	enum dpu_sspp pipe;
+	uint32_t features;      /* capabilities from catalog */
+	uint32_t nformats;
+	uint32_t formats[64];
+
+	struct dpu_hw_pipe *pipe_hw;
+	struct dpu_hw_pipe_cfg pipe_cfg;
+	struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
+	uint32_t color_fill;
+	bool is_error;
+	bool is_rt_pipe;
+	bool is_virtual;
+	struct list_head mplane_list;
+	struct dpu_mdss_cfg *catalog;
+
+	struct dpu_csc_cfg *csc_ptr;
+
+	const struct dpu_sspp_sub_blks *pipe_sblk;
+	char pipe_name[DPU_NAME_SIZE];
+
+	/* debugfs related stuff */
+	struct dentry *debugfs_root;
+	struct dpu_debugfs_regset32 debugfs_src;
+	struct dpu_debugfs_regset32 debugfs_scaler;
+	struct dpu_debugfs_regset32 debugfs_csc;
+	bool debugfs_default_scale;
+};
+
+#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
+
+static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
+{
+	struct msm_drm_private *priv;
+
+	if (!plane || !plane->dev)
+		return NULL;
+	priv = plane->dev->dev_private;
+	if (!priv)
+		return NULL;
+	return to_dpu_kms(priv->kms);
+}
+
+static bool dpu_plane_enabled(struct drm_plane_state *state)
+{
+	return state && state->fb && state->crtc;
+}
+
+static bool dpu_plane_sspp_enabled(struct drm_plane_state *state)
+{
+	return state && state->crtc;
+}
+
+/**
+ * _dpu_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane:		Pointer to drm plane
+ * @fmt:		Pointer to source buffer format
+ * @src_wdith:		width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+		const struct dpu_format *fmt, u32 src_width)
+{
+	struct dpu_plane *pdpu, *tmp;
+	struct dpu_plane_state *pstate;
+	u32 fixed_buff_size;
+	u32 total_fl;
+
+	if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
+		DPU_ERROR("invalid arguments\n");
+		return 0;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	pstate = to_dpu_plane_state(plane->state);
+	fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
+
+	list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
+		if (!dpu_plane_enabled(tmp->base.state))
+			continue;
+		DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
+				pdpu->base.base.id, tmp->base.base.id,
+				src_width,
+				drm_rect_width(&tmp->pipe_cfg.src_rect));
+		src_width = max_t(u32, src_width,
+				  drm_rect_width(&tmp->pipe_cfg.src_rect));
+	}
+
+	if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+		if (fmt->chroma_sample == DPU_CHROMA_420) {
+			/* NV12 */
+			total_fl = (fixed_buff_size / 2) /
+				((src_width + 32) * fmt->bpp);
+		} else {
+			/* non NV12 */
+			total_fl = (fixed_buff_size / 2) * 2 /
+				((src_width + 32) * fmt->bpp);
+		}
+	} else {
+		if (pstate->multirect_mode == DPU_SSPP_MULTIRECT_PARALLEL) {
+			total_fl = (fixed_buff_size / 2) * 2 /
+				((src_width + 32) * fmt->bpp);
+		} else {
+			total_fl = (fixed_buff_size) * 2 /
+				((src_width + 32) * fmt->bpp);
+		}
+	}
+
+	DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
+			plane->base.id, pdpu->pipe - SSPP_VIG0,
+			(char *)&fmt->base.pixel_format,
+			src_width, total_fl);
+
+	return total_fl;
+}
+
+/**
+ * _dpu_plane_get_qos_lut - get LUT mapping based on fill level
+ * @tbl:		Pointer to LUT table
+ * @total_fl:		fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+		u32 total_fl)
+{
+	int i;
+
+	if (!tbl || !tbl->nentry || !tbl->entries)
+		return 0;
+
+	for (i = 0; i < tbl->nentry; i++)
+		if (total_fl <= tbl->entries[i].fl)
+			return tbl->entries[i].lut;
+
+	/* if last fl is zero, use as default */
+	if (!tbl->entries[i-1].fl)
+		return tbl->entries[i-1].lut;
+
+	return 0;
+}
+
+/**
+ * _dpu_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane:		Pointer to drm plane
+ * @fb:			Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
+		struct drm_framebuffer *fb)
+{
+	struct dpu_plane *pdpu;
+	const struct dpu_format *fmt = NULL;
+	u64 qos_lut;
+	u32 total_fl = 0, lut_usage;
+
+	if (!plane || !fb) {
+		DPU_ERROR("invalid arguments plane %d fb %d\n",
+				plane != 0, fb != 0);
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+
+	if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	} else if (!pdpu->pipe_hw->ops.setup_creq_lut) {
+		return;
+	}
+
+	if (!pdpu->is_rt_pipe) {
+		lut_usage = DPU_QOS_LUT_USAGE_NRT;
+	} else {
+		fmt = dpu_get_dpu_format_ext(
+				fb->format->format,
+				fb->modifier);
+		total_fl = _dpu_plane_calc_fill_level(plane, fmt,
+				drm_rect_width(&pdpu->pipe_cfg.src_rect));
+
+		if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
+			lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
+		else
+			lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
+	}
+
+	qos_lut = _dpu_plane_get_qos_lut(
+			&pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
+
+	pdpu->pipe_qos_cfg.creq_lut = qos_lut;
+
+	trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
+			(fmt) ? fmt->base.pixel_format : 0,
+			pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
+
+	DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
+			plane->base.id,
+			pdpu->pipe - SSPP_VIG0,
+			fmt ? (char *)&fmt->base.pixel_format : NULL,
+			pdpu->is_rt_pipe, total_fl, qos_lut);
+
+	pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_panic_lut - set danger/safe LUT of the given plane
+ * @plane:		Pointer to drm plane
+ * @fb:			Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
+		struct drm_framebuffer *fb)
+{
+	struct dpu_plane *pdpu;
+	const struct dpu_format *fmt = NULL;
+	u32 danger_lut, safe_lut;
+
+	if (!plane || !fb) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+
+	if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	} else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) {
+		return;
+	}
+
+	if (!pdpu->is_rt_pipe) {
+		danger_lut = pdpu->catalog->perf.danger_lut_tbl
+				[DPU_QOS_LUT_USAGE_NRT];
+		safe_lut = pdpu->catalog->perf.safe_lut_tbl
+				[DPU_QOS_LUT_USAGE_NRT];
+	} else {
+		fmt = dpu_get_dpu_format_ext(
+				fb->format->format,
+				fb->modifier);
+
+		if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) {
+			danger_lut = pdpu->catalog->perf.danger_lut_tbl
+					[DPU_QOS_LUT_USAGE_LINEAR];
+			safe_lut = pdpu->catalog->perf.safe_lut_tbl
+					[DPU_QOS_LUT_USAGE_LINEAR];
+		} else {
+			danger_lut = pdpu->catalog->perf.danger_lut_tbl
+					[DPU_QOS_LUT_USAGE_MACROTILE];
+			safe_lut = pdpu->catalog->perf.safe_lut_tbl
+					[DPU_QOS_LUT_USAGE_MACROTILE];
+		}
+	}
+
+	pdpu->pipe_qos_cfg.danger_lut = danger_lut;
+	pdpu->pipe_qos_cfg.safe_lut = safe_lut;
+
+	trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
+			(fmt) ? fmt->base.pixel_format : 0,
+			(fmt) ? fmt->fetch_mode : 0,
+			pdpu->pipe_qos_cfg.danger_lut,
+			pdpu->pipe_qos_cfg.safe_lut);
+
+	DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
+		plane->base.id,
+		pdpu->pipe - SSPP_VIG0,
+		fmt ? (char *)&fmt->base.pixel_format : NULL,
+		fmt ? fmt->fetch_mode : -1,
+		pdpu->pipe_qos_cfg.danger_lut,
+		pdpu->pipe_qos_cfg.safe_lut);
+
+	pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw,
+			&pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane:		Pointer to drm plane
+ * @enable:		true to enable QoS control
+ * @flags:		QoS control mode (enum dpu_plane_qos)
+ */
+static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
+	bool enable, u32 flags)
+{
+	struct dpu_plane *pdpu;
+
+	if (!plane) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+
+	if (!pdpu->pipe_hw || !pdpu->pipe_sblk) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	} else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) {
+		return;
+	}
+
+	if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
+		pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
+		pdpu->pipe_qos_cfg.danger_vblank =
+				pdpu->pipe_sblk->danger_vblank;
+		pdpu->pipe_qos_cfg.vblank_en = enable;
+	}
+
+	if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
+		/* this feature overrules previous VBLANK_CTRL */
+		pdpu->pipe_qos_cfg.vblank_en = false;
+		pdpu->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+	}
+
+	if (flags & DPU_PLANE_QOS_PANIC_CTRL)
+		pdpu->pipe_qos_cfg.danger_safe_en = enable;
+
+	if (!pdpu->is_rt_pipe) {
+		pdpu->pipe_qos_cfg.vblank_en = false;
+		pdpu->pipe_qos_cfg.danger_safe_en = false;
+	}
+
+	DPU_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
+		plane->base.id,
+		pdpu->pipe - SSPP_VIG0,
+		pdpu->pipe_qos_cfg.danger_safe_en,
+		pdpu->pipe_qos_cfg.vblank_en,
+		pdpu->pipe_qos_cfg.creq_vblank,
+		pdpu->pipe_qos_cfg.danger_vblank,
+		pdpu->is_rt_pipe);
+
+	pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw,
+			&pdpu->pipe_qos_cfg);
+}
+
+int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+	struct dpu_plane *pdpu;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!plane || !plane->dev) {
+		DPU_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return -EINVAL;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+	pdpu = to_dpu_plane(plane);
+
+	if (!pdpu->is_rt_pipe)
+		goto end;
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+	_dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+end:
+	return 0;
+}
+
+/**
+ * _dpu_plane_set_ot_limit - set OT limit for the given plane
+ * @plane:		Pointer to drm plane
+ * @crtc:		Pointer to drm crtc
+ */
+static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
+		struct drm_crtc *crtc)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_vbif_set_ot_params ot_params;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!plane || !plane->dev || !crtc) {
+		DPU_ERROR("invalid arguments plane %d crtc %d\n",
+				plane != 0, crtc != 0);
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+	pdpu = to_dpu_plane(plane);
+	if (!pdpu->pipe_hw) {
+		DPU_ERROR("invalid pipe reference\n");
+		return;
+	}
+
+	memset(&ot_params, 0, sizeof(ot_params));
+	ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+	ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE;
+	ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
+	ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+	ot_params.is_wfd = !pdpu->is_rt_pipe;
+	ot_params.frame_rate = crtc->mode.vrefresh;
+	ot_params.vbif_idx = VBIF_RT;
+	ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+	ot_params.rd = true;
+
+	dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
+}
+
+/**
+ * _dpu_plane_set_vbif_qos - set vbif QoS for the given plane
+ * @plane:		Pointer to drm plane
+ */
+static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_vbif_set_qos_params qos_params;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!plane || !plane->dev) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+	pdpu = to_dpu_plane(plane);
+	if (!pdpu->pipe_hw) {
+		DPU_ERROR("invalid pipe reference\n");
+		return;
+	}
+
+	memset(&qos_params, 0, sizeof(qos_params));
+	qos_params.vbif_idx = VBIF_RT;
+	qos_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+	qos_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+	qos_params.num = pdpu->pipe_hw->idx - SSPP_VIG0;
+	qos_params.is_rt = pdpu->is_rt_pipe;
+
+	DPU_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
+			plane->base.id, qos_params.num,
+			qos_params.vbif_idx,
+			qos_params.xin_id, qos_params.is_rt,
+			qos_params.clk_ctrl);
+
+	dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
+}
+
+/**
+ * _dpu_plane_get_aspace: gets the address space
+ */
+static int _dpu_plane_get_aspace(
+		struct dpu_plane *pdpu,
+		struct dpu_plane_state *pstate,
+		struct msm_gem_address_space **aspace)
+{
+	struct dpu_kms *kms;
+
+	if (!pdpu || !pstate || !aspace) {
+		DPU_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	kms = _dpu_plane_get_kms(&pdpu->base);
+	if (!kms) {
+		DPU_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	*aspace = kms->base.aspace;
+
+	return 0;
+}
+
+static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+		struct dpu_plane_state *pstate,
+		struct dpu_hw_pipe_cfg *pipe_cfg,
+		struct drm_framebuffer *fb)
+{
+	struct dpu_plane *pdpu;
+	struct msm_gem_address_space *aspace = NULL;
+	int ret;
+
+	if (!plane || !pstate || !pipe_cfg || !fb) {
+		DPU_ERROR(
+			"invalid arg(s), plane %d state %d cfg %d fb %d\n",
+			plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	if (!pdpu->pipe_hw) {
+		DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n");
+		return;
+	}
+
+	ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+	if (ret) {
+		DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret);
+		return;
+	}
+
+	ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
+	if (ret == -EAGAIN)
+		DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
+	else if (ret)
+		DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+	else if (pdpu->pipe_hw->ops.setup_sourceaddress) {
+		trace_dpu_plane_set_scanout(pdpu->pipe_hw->idx,
+					    &pipe_cfg->layout,
+					    pstate->multirect_index);
+		pdpu->pipe_hw->ops.setup_sourceaddress(pdpu->pipe_hw, pipe_cfg,
+						pstate->multirect_index);
+	}
+}
+
+static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
+		struct dpu_plane_state *pstate,
+		uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+		struct dpu_hw_scaler3_cfg *scale_cfg,
+		const struct dpu_format *fmt,
+		uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+	uint32_t i;
+
+	if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
+			!chroma_subsmpl_v) {
+		DPU_ERROR(
+			"pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
+			!!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
+			chroma_subsmpl_v);
+		return;
+	}
+
+	memset(scale_cfg, 0, sizeof(*scale_cfg));
+	memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
+
+	scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
+		mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
+	scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
+		mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h);
+
+
+	scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2] =
+		scale_cfg->phase_step_y[DPU_SSPP_COMP_0] / chroma_subsmpl_v;
+	scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2] =
+		scale_cfg->phase_step_x[DPU_SSPP_COMP_0] / chroma_subsmpl_h;
+
+	scale_cfg->phase_step_x[DPU_SSPP_COMP_2] =
+		scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2];
+	scale_cfg->phase_step_y[DPU_SSPP_COMP_2] =
+		scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2];
+
+	scale_cfg->phase_step_x[DPU_SSPP_COMP_3] =
+		scale_cfg->phase_step_x[DPU_SSPP_COMP_0];
+	scale_cfg->phase_step_y[DPU_SSPP_COMP_3] =
+		scale_cfg->phase_step_y[DPU_SSPP_COMP_0];
+
+	for (i = 0; i < DPU_MAX_PLANES; i++) {
+		scale_cfg->src_width[i] = src_w;
+		scale_cfg->src_height[i] = src_h;
+		if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+			scale_cfg->src_width[i] /= chroma_subsmpl_h;
+			scale_cfg->src_height[i] /= chroma_subsmpl_v;
+		}
+		scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+		scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+		pstate->pixel_ext.num_ext_pxls_top[i] =
+			scale_cfg->src_height[i];
+		pstate->pixel_ext.num_ext_pxls_left[i] =
+			scale_cfg->src_width[i];
+	}
+	if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+		&& (src_w == dst_w))
+		return;
+
+	scale_cfg->dst_width = dst_w;
+	scale_cfg->dst_height = dst_h;
+	scale_cfg->y_rgb_filter_cfg = DPU_SCALE_BIL;
+	scale_cfg->uv_filter_cfg = DPU_SCALE_BIL;
+	scale_cfg->alpha_filter_cfg = DPU_SCALE_ALPHA_BIL;
+	scale_cfg->lut_flag = 0;
+	scale_cfg->blend_cfg = 1;
+	scale_cfg->enable = 1;
+}
+
+static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+{
+	static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+		{
+			/* S15.16 format */
+			0x00012A00, 0x00000000, 0x00019880,
+			0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+			0x00012A00, 0x00020480, 0x00000000,
+		},
+		/* signed bias */
+		{ 0xfff0, 0xff80, 0xff80,},
+		{ 0x0, 0x0, 0x0,},
+		/* unsigned clamp */
+		{ 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+		{ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+	};
+	static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+		{
+			/* S15.16 format */
+			0x00012A00, 0x00000000, 0x00019880,
+			0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+			0x00012A00, 0x00020480, 0x00000000,
+			},
+		/* signed bias */
+		{ 0xffc0, 0xfe00, 0xfe00,},
+		{ 0x0, 0x0, 0x0,},
+		/* unsigned clamp */
+		{ 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+		{ 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+	};
+
+	if (!pdpu) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	}
+
+	if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->features)
+		pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc10_YUV2RGB_601L;
+	else
+		pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc_YUV2RGB_601L;
+
+	DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
+			pdpu->csc_ptr->csc_mv[0],
+			pdpu->csc_ptr->csc_mv[1],
+			pdpu->csc_ptr->csc_mv[2]);
+}
+
+static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
+		struct dpu_plane_state *pstate,
+		const struct dpu_format *fmt, bool color_fill)
+{
+	struct dpu_hw_pixel_ext *pe;
+	uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+
+	if (!pdpu || !fmt || !pstate) {
+		DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
+				pdpu != 0, fmt != 0, pstate != 0);
+		return;
+	}
+
+	pe = &pstate->pixel_ext;
+
+	/* don't chroma subsample if decimating */
+	chroma_subsmpl_h =
+		drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
+	chroma_subsmpl_v =
+		drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+
+	/* update scaler. calculate default config for QSEED3 */
+	_dpu_plane_setup_scaler3(pdpu, pstate,
+			drm_rect_width(&pdpu->pipe_cfg.src_rect),
+			drm_rect_height(&pdpu->pipe_cfg.src_rect),
+			drm_rect_width(&pdpu->pipe_cfg.dst_rect),
+			drm_rect_height(&pdpu->pipe_cfg.dst_rect),
+			&pstate->scaler3_cfg, fmt,
+			chroma_subsmpl_h, chroma_subsmpl_v);
+}
+
+/**
+ * _dpu_plane_color_fill - enables color fill on plane
+ * @pdpu:   Pointer to DPU plane object
+ * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
+		uint32_t color, uint32_t alpha)
+{
+	const struct dpu_format *fmt;
+	const struct drm_plane *plane;
+	struct dpu_plane_state *pstate;
+
+	if (!pdpu || !pdpu->base.state) {
+		DPU_ERROR("invalid plane\n");
+		return -EINVAL;
+	}
+
+	if (!pdpu->pipe_hw) {
+		DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n");
+		return -EINVAL;
+	}
+
+	plane = &pdpu->base;
+	pstate = to_dpu_plane_state(plane->state);
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	/*
+	 * select fill format to match user property expectation,
+	 * h/w only supports RGB variants
+	 */
+	fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888);
+
+	/* update sspp */
+	if (fmt && pdpu->pipe_hw->ops.setup_solidfill) {
+		pdpu->pipe_hw->ops.setup_solidfill(pdpu->pipe_hw,
+				(color & 0xFFFFFF) | ((alpha & 0xFF) << 24),
+				pstate->multirect_index);
+
+		/* override scaler/decimation if solid fill */
+		pdpu->pipe_cfg.src_rect.x1 = 0;
+		pdpu->pipe_cfg.src_rect.y1 = 0;
+		pdpu->pipe_cfg.src_rect.x2 =
+			drm_rect_width(&pdpu->pipe_cfg.dst_rect);
+		pdpu->pipe_cfg.src_rect.y2 =
+			drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+		_dpu_plane_setup_scaler(pdpu, pstate, fmt, true);
+
+		if (pdpu->pipe_hw->ops.setup_format)
+			pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw,
+					fmt, DPU_SSPP_SOLID_FILL,
+					pstate->multirect_index);
+
+		if (pdpu->pipe_hw->ops.setup_rects)
+			pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+					&pdpu->pipe_cfg,
+					pstate->multirect_index);
+
+		if (pdpu->pipe_hw->ops.setup_pe)
+			pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+					&pstate->pixel_ext);
+
+		if (pdpu->pipe_hw->ops.setup_scaler &&
+				pstate->multirect_index != DPU_SSPP_RECT_1)
+			pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+					&pdpu->pipe_cfg, &pstate->pixel_ext,
+					&pstate->scaler3_cfg);
+	}
+
+	return 0;
+}
+
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
+{
+	struct dpu_plane_state *pstate;
+
+	if (!drm_state)
+		return;
+
+	pstate = to_dpu_plane_state(drm_state);
+
+	pstate->multirect_index = DPU_SSPP_RECT_SOLO;
+	pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+}
+
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
+{
+	struct dpu_plane_state *pstate[R_MAX];
+	const struct drm_plane_state *drm_state[R_MAX];
+	struct drm_rect src[R_MAX], dst[R_MAX];
+	struct dpu_plane *dpu_plane[R_MAX];
+	const struct dpu_format *fmt[R_MAX];
+	int i, buffer_lines;
+	unsigned int max_tile_height = 1;
+	bool parallel_fetch_qualified = true;
+	bool has_tiled_rect = false;
+
+	for (i = 0; i < R_MAX; i++) {
+		const struct msm_format *msm_fmt;
+
+		drm_state[i] = i ? plane->r1 : plane->r0;
+		msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
+		fmt[i] = to_dpu_format(msm_fmt);
+
+		if (DPU_FORMAT_IS_UBWC(fmt[i])) {
+			has_tiled_rect = true;
+			if (fmt[i]->tile_height > max_tile_height)
+				max_tile_height = fmt[i]->tile_height;
+		}
+	}
+
+	for (i = 0; i < R_MAX; i++) {
+		int width_threshold;
+
+		pstate[i] = to_dpu_plane_state(drm_state[i]);
+		dpu_plane[i] = to_dpu_plane(drm_state[i]->plane);
+
+		if (pstate[i] == NULL) {
+			DPU_ERROR("DPU plane state of plane id %d is NULL\n",
+				drm_state[i]->plane->base.id);
+			return -EINVAL;
+		}
+
+		src[i].x1 = drm_state[i]->src_x >> 16;
+		src[i].y1 = drm_state[i]->src_y >> 16;
+		src[i].x2 = src[i].x1 + (drm_state[i]->src_w >> 16);
+		src[i].y2 = src[i].y1 + (drm_state[i]->src_h >> 16);
+
+		dst[i] = drm_plane_state_dest(drm_state[i]);
+
+		if (drm_rect_calc_hscale(&src[i], &dst[i], 1, 1) != 1 ||
+		    drm_rect_calc_vscale(&src[i], &dst[i], 1, 1) != 1) {
+			DPU_ERROR_PLANE(dpu_plane[i],
+				"scaling is not supported in multirect mode\n");
+			return -EINVAL;
+		}
+
+		if (DPU_FORMAT_IS_YUV(fmt[i])) {
+			DPU_ERROR_PLANE(dpu_plane[i],
+				"Unsupported format for multirect mode\n");
+			return -EINVAL;
+		}
+
+		/**
+		 * SSPP PD_MEM is split half - one for each RECT.
+		 * Tiled formats need 5 lines of buffering while fetching
+		 * whereas linear formats need only 2 lines.
+		 * So we cannot support more than half of the supported SSPP
+		 * width for tiled formats.
+		 */
+		width_threshold = dpu_plane[i]->pipe_sblk->common->maxlinewidth;
+		if (has_tiled_rect)
+			width_threshold /= 2;
+
+		if (parallel_fetch_qualified &&
+		    drm_rect_width(&src[i]) > width_threshold)
+			parallel_fetch_qualified = false;
+
+	}
+
+	/* Validate RECT's and set the mode */
+
+	/* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
+	if (parallel_fetch_qualified) {
+		pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+		pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+		goto done;
+	}
+
+	/* TIME_MX Mode */
+	buffer_lines = 2 * max_tile_height;
+
+	if (dst[R1].y1 >= dst[R0].y2 + buffer_lines ||
+	    dst[R0].y1 >= dst[R1].y2 + buffer_lines) {
+		pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+		pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+	} else {
+		DPU_ERROR(
+			"No multirect mode possible for the planes (%d - %d)\n",
+			drm_state[R0]->plane->base.id,
+			drm_state[R1]->plane->base.id);
+		return -EINVAL;
+	}
+
+done:
+	if (dpu_plane[R0]->is_virtual) {
+		pstate[R0]->multirect_index = DPU_SSPP_RECT_1;
+		pstate[R1]->multirect_index = DPU_SSPP_RECT_0;
+	} else {
+		pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
+		pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
+	};
+
+	DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
+		pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
+	DPU_DEBUG_PLANE(dpu_plane[R1], "R1: %d - %d\n",
+		pstate[R1]->multirect_mode, pstate[R1]->multirect_index);
+	return 0;
+}
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush for the given plane
+ * @plane: Pointer to drm plane structure
+ * @ctl: Pointer to hardware control driver
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+		u32 *flush_sspp)
+{
+	struct dpu_plane_state *pstate;
+
+	if (!plane || !flush_sspp) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	pstate = to_dpu_plane_state(plane->state);
+
+	*flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
+}
+
+static int dpu_plane_prepare_fb(struct drm_plane *plane,
+		struct drm_plane_state *new_state)
+{
+	struct drm_framebuffer *fb = new_state->fb;
+	struct dpu_plane *pdpu = to_dpu_plane(plane);
+	struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
+	struct dpu_hw_fmt_layout layout;
+	struct drm_gem_object *obj;
+	struct msm_gem_object *msm_obj;
+	struct dma_fence *fence;
+	struct msm_gem_address_space *aspace;
+	int ret;
+
+	if (!new_state->fb)
+		return 0;
+
+	DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
+
+	ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+	if (ret) {
+		DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n");
+		return ret;
+	}
+
+	/* cache aspace */
+	pstate->aspace = aspace;
+
+	/*
+	 * TODO: Need to sort out the msm_framebuffer_prepare() call below so
+	 *       we can use msm_atomic_prepare_fb() instead of doing the
+	 *       implicit fence and fb prepare by hand here.
+	 */
+	obj = msm_framebuffer_bo(new_state->fb, 0);
+	msm_obj = to_msm_bo(obj);
+	fence = reservation_object_get_excl_rcu(msm_obj->resv);
+	if (fence)
+		drm_atomic_set_fence_for_plane(new_state, fence);
+
+	if (pstate->aspace) {
+		ret = msm_framebuffer_prepare(new_state->fb,
+				pstate->aspace);
+		if (ret) {
+			DPU_ERROR("failed to prepare framebuffer\n");
+			return ret;
+		}
+	}
+
+	/* validate framebuffer layout before commit */
+	ret = dpu_format_populate_layout(pstate->aspace,
+			new_state->fb, &layout);
+	if (ret) {
+		DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void dpu_plane_cleanup_fb(struct drm_plane *plane,
+		struct drm_plane_state *old_state)
+{
+	struct dpu_plane *pdpu = to_dpu_plane(plane);
+	struct dpu_plane_state *old_pstate;
+
+	if (!old_state || !old_state->fb)
+		return;
+
+	old_pstate = to_dpu_plane_state(old_state);
+
+	DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
+
+	msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace);
+}
+
+static bool dpu_plane_validate_src(struct drm_rect *src,
+				   struct drm_rect *fb_rect,
+				   uint32_t min_src_size)
+{
+	/* Ensure fb size is supported */
+	if (drm_rect_width(fb_rect) > MAX_IMG_WIDTH ||
+	    drm_rect_height(fb_rect) > MAX_IMG_HEIGHT)
+		return false;
+
+	/* Ensure src rect is above the minimum size */
+	if (drm_rect_width(src) < min_src_size ||
+	    drm_rect_height(src) < min_src_size)
+		return false;
+
+	/* Ensure src is fully encapsulated in fb */
+	return drm_rect_intersect(fb_rect, src) &&
+		drm_rect_equals(fb_rect, src);
+}
+
+static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	int ret = 0;
+	struct dpu_plane *pdpu;
+	struct dpu_plane_state *pstate;
+	const struct dpu_format *fmt;
+	struct drm_rect src, dst, fb_rect = { 0 };
+	uint32_t max_upscale = 1, max_downscale = 1;
+	uint32_t min_src_size, max_linewidth;
+	int hscale = 1, vscale = 1;
+
+	if (!plane || !state) {
+		DPU_ERROR("invalid arg(s), plane %d state %d\n",
+				plane != 0, state != 0);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	pstate = to_dpu_plane_state(state);
+
+	if (!pdpu->pipe_sblk) {
+		DPU_ERROR_PLANE(pdpu, "invalid catalog\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	src.x1 = state->src_x >> 16;
+	src.y1 = state->src_y >> 16;
+	src.x2 = src.x1 + (state->src_w >> 16);
+	src.y2 = src.y1 + (state->src_h >> 16);
+
+	dst = drm_plane_state_dest(state);
+
+	fb_rect.x2 = state->fb->width;
+	fb_rect.y2 = state->fb->height;
+
+	max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
+
+	if (pdpu->features & DPU_SSPP_SCALER) {
+		max_downscale = pdpu->pipe_sblk->maxdwnscale;
+		max_upscale = pdpu->pipe_sblk->maxupscale;
+	}
+	if (drm_rect_width(&src) < drm_rect_width(&dst))
+		hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale);
+	else
+		hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale);
+	if (drm_rect_height(&src) < drm_rect_height(&dst))
+		vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale);
+	else
+		vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale);
+
+	DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n",
+		dpu_plane_enabled(plane->state), dpu_plane_enabled(state));
+
+	if (!dpu_plane_enabled(state))
+		goto exit;
+
+	fmt = to_dpu_format(msm_framebuffer_format(state->fb));
+
+	min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+	if (DPU_FORMAT_IS_YUV(fmt) &&
+		(!(pdpu->features & DPU_SSPP_SCALER) ||
+		 !(pdpu->features & (BIT(DPU_SSPP_CSC)
+		 | BIT(DPU_SSPP_CSC_10BIT))))) {
+		DPU_ERROR_PLANE(pdpu,
+				"plane doesn't have scaler/csc for yuv\n");
+		ret = -EINVAL;
+
+	/* check src bounds */
+	} else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
+		DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+				DRM_RECT_ARG(&src));
+		ret = -E2BIG;
+
+	/* valid yuv image */
+	} else if (DPU_FORMAT_IS_YUV(fmt) &&
+		   (src.x1 & 0x1 || src.y1 & 0x1 ||
+		    drm_rect_width(&src) & 0x1 ||
+		    drm_rect_height(&src) & 0x1)) {
+		DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+				DRM_RECT_ARG(&src));
+		ret = -EINVAL;
+
+	/* min dst support */
+	} else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
+		DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+				DRM_RECT_ARG(&dst));
+		ret = -EINVAL;
+
+	/* check decimated source width */
+	} else if (drm_rect_width(&src) > max_linewidth) {
+		DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+				DRM_RECT_ARG(&src), max_linewidth);
+		ret = -E2BIG;
+
+	/* check scaler capability */
+	} else if (hscale < 0 || vscale < 0) {
+		DPU_ERROR_PLANE(pdpu, "invalid scaling requested src="
+				DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n",
+				DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst));
+		ret = -E2BIG;
+	}
+
+exit:
+	return ret;
+}
+
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	if (!state->fb)
+		return 0;
+
+	DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n");
+
+	return dpu_plane_sspp_atomic_check(plane, state);
+}
+
+void dpu_plane_flush(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_plane_state *pstate;
+
+	if (!plane || !plane->state) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	pstate = to_dpu_plane_state(plane->state);
+
+	/*
+	 * These updates have to be done immediately before the plane flush
+	 * timing, and may not be moved to the atomic_update/mode_set functions.
+	 */
+	if (pdpu->is_error)
+		/* force white frame with 100% alpha pipe output on error */
+		_dpu_plane_color_fill(pdpu, 0xFFFFFF, 0xFF);
+	else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
+		/* force 100% alpha */
+		_dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
+	else if (pdpu->pipe_hw && pdpu->csc_ptr && pdpu->pipe_hw->ops.setup_csc)
+		pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, pdpu->csc_ptr);
+
+	/* flag h/w flush complete */
+	if (plane->state)
+		pstate->pending = false;
+}
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error)
+{
+	struct dpu_plane *pdpu;
+
+	if (!plane)
+		return;
+
+	pdpu = to_dpu_plane(plane);
+	pdpu->is_error = error;
+}
+
+static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
+				struct drm_plane_state *old_state)
+{
+	uint32_t nplanes, src_flags;
+	struct dpu_plane *pdpu;
+	struct drm_plane_state *state;
+	struct dpu_plane_state *pstate;
+	struct dpu_plane_state *old_pstate;
+	const struct dpu_format *fmt;
+	struct drm_crtc *crtc;
+	struct drm_framebuffer *fb;
+	int ret, min_scale;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return -EINVAL;
+	} else if (!plane->state) {
+		DPU_ERROR("invalid plane state\n");
+		return -EINVAL;
+	} else if (!old_state) {
+		DPU_ERROR("invalid old state\n");
+		return -EINVAL;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	state = plane->state;
+
+	pstate = to_dpu_plane_state(state);
+
+	old_pstate = to_dpu_plane_state(old_state);
+
+	crtc = state->crtc;
+	fb = state->fb;
+	if (!crtc || !fb) {
+		DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n",
+				crtc != 0, fb != 0);
+		return -EINVAL;
+	}
+	fmt = to_dpu_format(msm_framebuffer_format(fb));
+	nplanes = fmt->num_planes;
+
+	memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
+
+	_dpu_plane_set_scanout(plane, pstate, &pdpu->pipe_cfg, fb);
+
+	pstate->pending = true;
+
+	pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
+	_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+	min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale);
+	ret = drm_atomic_helper_check_plane_state(state, crtc->state, min_scale,
+					  pdpu->pipe_sblk->maxupscale << 16,
+					  true, false);
+	if (ret) {
+		DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
+		return ret;
+	}
+
+	DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
+			", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
+			crtc->base.id, DRM_RECT_ARG(&state->dst),
+			(char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt));
+
+	pdpu->pipe_cfg.src_rect = state->src;
+
+	/* state->src is 16.16, src_rect is not */
+	pdpu->pipe_cfg.src_rect.x1 >>= 16;
+	pdpu->pipe_cfg.src_rect.x2 >>= 16;
+	pdpu->pipe_cfg.src_rect.y1 >>= 16;
+	pdpu->pipe_cfg.src_rect.y2 >>= 16;
+
+	pdpu->pipe_cfg.dst_rect = state->dst;
+
+	_dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
+
+	/* override for color fill */
+	if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
+		/* skip remaining processing on color fill */
+		return 0;
+	}
+
+	if (pdpu->pipe_hw->ops.setup_rects) {
+		pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+				&pdpu->pipe_cfg,
+				pstate->multirect_index);
+	}
+
+	if (pdpu->pipe_hw->ops.setup_pe &&
+			(pstate->multirect_index != DPU_SSPP_RECT_1))
+		pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+				&pstate->pixel_ext);
+
+	/**
+	 * when programmed in multirect mode, scalar block will be
+	 * bypassed. Still we need to update alpha and bitwidth
+	 * ONLY for RECT0
+	 */
+	if (pdpu->pipe_hw->ops.setup_scaler &&
+			pstate->multirect_index != DPU_SSPP_RECT_1)
+		pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+				&pdpu->pipe_cfg, &pstate->pixel_ext,
+				&pstate->scaler3_cfg);
+
+	if (pdpu->pipe_hw->ops.setup_multirect)
+		pdpu->pipe_hw->ops.setup_multirect(
+				pdpu->pipe_hw,
+				pstate->multirect_index,
+				pstate->multirect_mode);
+
+	if (pdpu->pipe_hw->ops.setup_format) {
+		src_flags = 0x0;
+
+		/* update format */
+		pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
+				pstate->multirect_index);
+
+		if (pdpu->pipe_hw->ops.setup_cdp) {
+			struct dpu_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+
+			memset(cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+
+			cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg
+					[DPU_PERF_CDP_USAGE_RT].rd_enable;
+			cdp_cfg->ubwc_meta_enable =
+					DPU_FORMAT_IS_UBWC(fmt);
+			cdp_cfg->tile_amortize_enable =
+					DPU_FORMAT_IS_UBWC(fmt) ||
+					DPU_FORMAT_IS_TILE(fmt);
+			cdp_cfg->preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
+
+			pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, cdp_cfg);
+		}
+
+		/* update csc */
+		if (DPU_FORMAT_IS_YUV(fmt))
+			_dpu_plane_setup_csc(pdpu);
+		else
+			pdpu->csc_ptr = 0;
+	}
+
+	_dpu_plane_set_qos_lut(plane, fb);
+	_dpu_plane_set_danger_lut(plane, fb);
+
+	if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+		_dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL);
+		_dpu_plane_set_ot_limit(plane, crtc);
+	}
+
+	_dpu_plane_set_qos_remap(plane);
+	return 0;
+}
+
+static void _dpu_plane_atomic_disable(struct drm_plane *plane,
+				struct drm_plane_state *old_state)
+{
+	struct dpu_plane *pdpu;
+	struct drm_plane_state *state;
+	struct dpu_plane_state *pstate;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	} else if (!plane->state) {
+		DPU_ERROR("invalid plane state\n");
+		return;
+	} else if (!old_state) {
+		DPU_ERROR("invalid old state\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	state = plane->state;
+	pstate = to_dpu_plane_state(state);
+
+	trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
+				pstate->multirect_mode);
+
+	pstate->pending = true;
+
+	if (is_dpu_plane_virtual(plane) &&
+			pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect)
+		pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw,
+				DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE);
+}
+
+static void dpu_plane_atomic_update(struct drm_plane *plane,
+				struct drm_plane_state *old_state)
+{
+	struct dpu_plane *pdpu;
+	struct drm_plane_state *state;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	} else if (!plane->state) {
+		DPU_ERROR("invalid plane state\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	pdpu->is_error = false;
+	state = plane->state;
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	if (!dpu_plane_sspp_enabled(state)) {
+		_dpu_plane_atomic_disable(plane, old_state);
+	} else {
+		int ret;
+
+		ret = dpu_plane_sspp_atomic_update(plane, old_state);
+		/* atomic_check should have ensured that this doesn't fail */
+		WARN_ON(ret < 0);
+	}
+}
+
+void dpu_plane_restore(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+
+	if (!plane || !plane->state) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	/* last plane state is same as current state */
+	dpu_plane_atomic_update(plane, plane->state);
+}
+
+static void dpu_plane_destroy(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	if (pdpu) {
+		_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+		mutex_destroy(&pdpu->lock);
+
+		drm_plane_helper_disable(plane, NULL);
+
+		/* this will destroy the states as well */
+		drm_plane_cleanup(plane);
+
+		if (pdpu->pipe_hw)
+			dpu_hw_sspp_destroy(pdpu->pipe_hw);
+
+		kfree(pdpu);
+	}
+}
+
+static void dpu_plane_destroy_state(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	struct dpu_plane_state *pstate;
+
+	if (!plane || !state) {
+		DPU_ERROR("invalid arg(s), plane %d state %d\n",
+				plane != 0, state != 0);
+		return;
+	}
+
+	pstate = to_dpu_plane_state(state);
+
+	/* remove ref count for frame buffers */
+	if (state->fb)
+		drm_framebuffer_put(state->fb);
+
+	kfree(pstate);
+}
+
+static struct drm_plane_state *
+dpu_plane_duplicate_state(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_plane_state *pstate;
+	struct dpu_plane_state *old_state;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return NULL;
+	} else if (!plane->state) {
+		DPU_ERROR("invalid plane state\n");
+		return NULL;
+	}
+
+	old_state = to_dpu_plane_state(plane->state);
+	pdpu = to_dpu_plane(plane);
+	pstate = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL);
+	if (!pstate) {
+		DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+		return NULL;
+	}
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	pstate->pending = false;
+
+	__drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
+	return &pstate->base;
+}
+
+static void dpu_plane_reset(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_plane_state *pstate;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	/* remove previous state, if present */
+	if (plane->state) {
+		dpu_plane_destroy_state(plane, plane->state);
+		plane->state = 0;
+	}
+
+	pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+	if (!pstate) {
+		DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+		return;
+	}
+
+	pstate->base.plane = plane;
+
+	plane->state = &pstate->base;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t _dpu_plane_danger_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct dpu_kms *kms = file->private_data;
+	struct dpu_mdss_cfg *cfg = kms->catalog;
+	int len = 0;
+	char buf[40] = {'\0'};
+
+	if (!cfg)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0; /* the end */
+
+	len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;   /* increase offset */
+
+	return len;
+}
+
+static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+{
+	struct drm_plane *plane;
+
+	drm_for_each_plane(plane, kms->dev) {
+		if (plane->fb && plane->state) {
+			dpu_plane_danger_signal_ctrl(plane, enable);
+			DPU_DEBUG("plane:%d img:%dx%d ",
+				plane->base.id, plane->fb->width,
+				plane->fb->height);
+			DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+				plane->state->src_x >> 16,
+				plane->state->src_y >> 16,
+				plane->state->src_w >> 16,
+				plane->state->src_h >> 16,
+				plane->state->crtc_x, plane->state->crtc_y,
+				plane->state->crtc_w, plane->state->crtc_h);
+		} else {
+			DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
+		}
+	}
+}
+
+static ssize_t _dpu_plane_danger_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct dpu_kms *kms = file->private_data;
+	struct dpu_mdss_cfg *cfg = kms->catalog;
+	int disable_panic;
+	char buf[10];
+
+	if (!cfg)
+		return -EFAULT;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (kstrtoint(buf, 0, &disable_panic))
+		return -EFAULT;
+
+	if (disable_panic) {
+		/* Disable panic signal for all active pipes */
+		DPU_DEBUG("Disabling danger:\n");
+		_dpu_plane_set_danger_state(kms, false);
+		kms->has_danger_ctrl = false;
+	} else {
+		/* Enable panic signal for all active pipes */
+		DPU_DEBUG("Enabling danger:\n");
+		kms->has_danger_ctrl = true;
+		_dpu_plane_set_danger_state(kms, true);
+	}
+
+	return count;
+}
+
+static const struct file_operations dpu_plane_danger_enable = {
+	.open = simple_open,
+	.read = _dpu_plane_danger_read,
+	.write = _dpu_plane_danger_write,
+};
+
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_kms *kms;
+	struct msm_drm_private *priv;
+	const struct dpu_sspp_sub_blks *sblk = 0;
+	const struct dpu_sspp_cfg *cfg = 0;
+
+	if (!plane || !plane->dev) {
+		DPU_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return -EINVAL;
+	}
+
+	kms = to_dpu_kms(priv->kms);
+	pdpu = to_dpu_plane(plane);
+
+	if (pdpu && pdpu->pipe_hw)
+		cfg = pdpu->pipe_hw->cap;
+	if (cfg)
+		sblk = cfg->sblk;
+
+	if (!sblk)
+		return 0;
+
+	/* create overall sub-directory for the pipe */
+	pdpu->debugfs_root =
+		debugfs_create_dir(pdpu->pipe_name,
+				plane->dev->primary->debugfs_root);
+
+	if (!pdpu->debugfs_root)
+		return -ENOMEM;
+
+	/* don't error check these */
+	debugfs_create_x32("features", 0600,
+			pdpu->debugfs_root, &pdpu->features);
+
+	/* add register dump support */
+	dpu_debugfs_setup_regset32(&pdpu->debugfs_src,
+			sblk->src_blk.base + cfg->base,
+			sblk->src_blk.len,
+			kms);
+	dpu_debugfs_create_regset32("src_blk", 0400,
+			pdpu->debugfs_root, &pdpu->debugfs_src);
+
+	if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+			cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
+		dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
+				sblk->scaler_blk.base + cfg->base,
+				sblk->scaler_blk.len,
+				kms);
+		dpu_debugfs_create_regset32("scaler_blk", 0400,
+				pdpu->debugfs_root,
+				&pdpu->debugfs_scaler);
+		debugfs_create_bool("default_scaling",
+				0600,
+				pdpu->debugfs_root,
+				&pdpu->debugfs_default_scale);
+	}
+
+	if (cfg->features & BIT(DPU_SSPP_CSC) ||
+			cfg->features & BIT(DPU_SSPP_CSC_10BIT)) {
+		dpu_debugfs_setup_regset32(&pdpu->debugfs_csc,
+				sblk->csc_blk.base + cfg->base,
+				sblk->csc_blk.len,
+				kms);
+		dpu_debugfs_create_regset32("csc_blk", 0400,
+				pdpu->debugfs_root, &pdpu->debugfs_csc);
+	}
+
+	debugfs_create_u32("xin_id",
+			0400,
+			pdpu->debugfs_root,
+			(u32 *) &cfg->xin_id);
+	debugfs_create_u32("clk_ctrl",
+			0400,
+			pdpu->debugfs_root,
+			(u32 *) &cfg->clk_ctrl);
+	debugfs_create_x32("creq_vblank",
+			0600,
+			pdpu->debugfs_root,
+			(u32 *) &sblk->creq_vblank);
+	debugfs_create_x32("danger_vblank",
+			0600,
+			pdpu->debugfs_root,
+			(u32 *) &sblk->danger_vblank);
+
+	debugfs_create_file("disable_danger",
+			0600,
+			pdpu->debugfs_root,
+			kms, &dpu_plane_danger_enable);
+
+	return 0;
+}
+
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+
+	if (!plane)
+		return;
+	pdpu = to_dpu_plane(plane);
+
+	debugfs_remove_recursive(pdpu->debugfs_root);
+}
+#else
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+	return 0;
+}
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+}
+#endif
+
+static int dpu_plane_late_register(struct drm_plane *plane)
+{
+	return _dpu_plane_init_debugfs(plane);
+}
+
+static void dpu_plane_early_unregister(struct drm_plane *plane)
+{
+	_dpu_plane_destroy_debugfs(plane);
+}
+
+static const struct drm_plane_funcs dpu_plane_funcs = {
+		.update_plane = drm_atomic_helper_update_plane,
+		.disable_plane = drm_atomic_helper_disable_plane,
+		.destroy = dpu_plane_destroy,
+		.reset = dpu_plane_reset,
+		.atomic_duplicate_state = dpu_plane_duplicate_state,
+		.atomic_destroy_state = dpu_plane_destroy_state,
+		.late_register = dpu_plane_late_register,
+		.early_unregister = dpu_plane_early_unregister,
+};
+
+static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
+		.prepare_fb = dpu_plane_prepare_fb,
+		.cleanup_fb = dpu_plane_cleanup_fb,
+		.atomic_check = dpu_plane_atomic_check,
+		.atomic_update = dpu_plane_atomic_update,
+};
+
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
+{
+	return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
+}
+
+bool is_dpu_plane_virtual(struct drm_plane *plane)
+{
+	return plane ? to_dpu_plane(plane)->is_virtual : false;
+}
+
+/* initialize plane */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+		uint32_t pipe, bool primary_plane,
+		unsigned long possible_crtcs, u32 master_plane_id)
+{
+	struct drm_plane *plane = NULL, *master_plane = NULL;
+	const struct dpu_format_extended *format_list;
+	struct dpu_plane *pdpu;
+	struct msm_drm_private *priv;
+	struct dpu_kms *kms;
+	enum drm_plane_type type;
+	int zpos_max = DPU_ZPOS_MAX;
+	int ret = -EINVAL;
+
+	if (!dev) {
+		DPU_ERROR("[%u]device is NULL\n", pipe);
+		goto exit;
+	}
+
+	priv = dev->dev_private;
+	if (!priv) {
+		DPU_ERROR("[%u]private data is NULL\n", pipe);
+		goto exit;
+	}
+
+	if (!priv->kms) {
+		DPU_ERROR("[%u]invalid KMS reference\n", pipe);
+		goto exit;
+	}
+	kms = to_dpu_kms(priv->kms);
+
+	if (!kms->catalog) {
+		DPU_ERROR("[%u]invalid catalog reference\n", pipe);
+		goto exit;
+	}
+
+	/* create and zero local structure */
+	pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
+	if (!pdpu) {
+		DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	/* cache local stuff for later */
+	plane = &pdpu->base;
+	pdpu->pipe = pipe;
+	pdpu->is_virtual = (master_plane_id != 0);
+	INIT_LIST_HEAD(&pdpu->mplane_list);
+	master_plane = drm_plane_find(dev, NULL, master_plane_id);
+	if (master_plane) {
+		struct dpu_plane *mpdpu = to_dpu_plane(master_plane);
+
+		list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list);
+	}
+
+	/* initialize underlying h/w driver */
+	pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog,
+							master_plane_id != 0);
+	if (IS_ERR(pdpu->pipe_hw)) {
+		DPU_ERROR("[%u]SSPP init failed\n", pipe);
+		ret = PTR_ERR(pdpu->pipe_hw);
+		goto clean_plane;
+	} else if (!pdpu->pipe_hw->cap || !pdpu->pipe_hw->cap->sblk) {
+		DPU_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
+		goto clean_sspp;
+	}
+
+	/* cache features mask for later */
+	pdpu->features = pdpu->pipe_hw->cap->features;
+	pdpu->pipe_sblk = pdpu->pipe_hw->cap->sblk;
+	if (!pdpu->pipe_sblk) {
+		DPU_ERROR("[%u]invalid sblk\n", pipe);
+		goto clean_sspp;
+	}
+
+	if (!master_plane_id)
+		format_list = pdpu->pipe_sblk->format_list;
+	else
+		format_list = pdpu->pipe_sblk->virt_format_list;
+
+	pdpu->nformats = dpu_populate_formats(format_list,
+				pdpu->formats,
+				0,
+				ARRAY_SIZE(pdpu->formats));
+
+	if (!pdpu->nformats) {
+		DPU_ERROR("[%u]no valid formats for plane\n", pipe);
+		goto clean_sspp;
+	}
+
+	if (pdpu->features & BIT(DPU_SSPP_CURSOR))
+		type = DRM_PLANE_TYPE_CURSOR;
+	else if (primary_plane)
+		type = DRM_PLANE_TYPE_PRIMARY;
+	else
+		type = DRM_PLANE_TYPE_OVERLAY;
+	ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+				pdpu->formats, pdpu->nformats,
+				NULL, type, NULL);
+	if (ret)
+		goto clean_sspp;
+
+	pdpu->catalog = kms->catalog;
+
+	if (kms->catalog->mixer_count &&
+		kms->catalog->mixer[0].sblk->maxblendstages) {
+		zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1;
+		if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1)
+			zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1;
+	}
+
+	ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max);
+	if (ret)
+		DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+
+	/* success! finalize initialization */
+	drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
+
+	/* save user friendly pipe name for later */
+	snprintf(pdpu->pipe_name, DPU_NAME_SIZE, "plane%u", plane->base.id);
+
+	mutex_init(&pdpu->lock);
+
+	DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", pdpu->pipe_name,
+					pipe, plane->base.id, master_plane_id);
+	return plane;
+
+clean_sspp:
+	if (pdpu && pdpu->pipe_hw)
+		dpu_hw_sspp_destroy(pdpu->pipe_hw);
+clean_plane:
+	kfree(pdpu);
+exit:
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
new file mode 100644
index 0000000..f6fe6dd
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_PLANE_H_
+#define _DPU_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_sspp.h"
+
+/**
+ * struct dpu_plane_state: Define dpu extension of drm plane state object
+ * @base:	base drm plane state object
+ * @property_state: Local storage for msm_prop properties
+ * @property_values:	cached plane property values
+ * @aspace:	pointer to address space for input/output buffers
+ * @input_fence:	dereferenced input fence pointer
+ * @stage:	assigned by crtc blender
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
+ * @pending:	whether the current update is still pending
+ * @scaler3_cfg: configuration data for scaler3
+ * @pixel_ext: configuration data for pixel extensions
+ * @scaler_check_state: indicates status of user provided pixel extension data
+ * @cdp_cfg:	CDP configuration
+ */
+struct dpu_plane_state {
+	struct drm_plane_state base;
+	struct msm_gem_address_space *aspace;
+	void *input_fence;
+	enum dpu_stage stage;
+	uint32_t multirect_index;
+	uint32_t multirect_mode;
+	bool pending;
+
+	/* scaler configuration */
+	struct dpu_hw_scaler3_cfg scaler3_cfg;
+	struct dpu_hw_pixel_ext pixel_ext;
+
+	struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+};
+
+/**
+ * struct dpu_multirect_plane_states: Defines multirect pair of drm plane states
+ * @r0: drm plane configured on rect 0
+ * @r1: drm plane configured on rect 1
+ */
+struct dpu_multirect_plane_states {
+	const struct drm_plane_state *r0;
+	const struct drm_plane_state *r1;
+};
+
+#define to_dpu_plane_state(x) \
+	container_of(x, struct dpu_plane_state, base)
+
+/**
+ * dpu_plane_pipe - return sspp identifier for the given plane
+ * @plane:   Pointer to DRM plane object
+ * Returns: sspp identifier of the given plane
+ */
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
+
+/**
+ * is_dpu_plane_virtual - check for virtual plane
+ * @plane: Pointer to DRM plane object
+ * returns: true - if the plane is virtual
+ *          false - if the plane is primary
+ */
+bool is_dpu_plane_virtual(struct drm_plane *plane);
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush mask
+ * @plane:   Pointer to DRM plane object
+ * @ctl: Pointer to control hardware
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+		u32 *flush_sspp);
+
+/**
+ * dpu_plane_restore - restore hw state if previously power collapsed
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_restore(struct drm_plane *plane);
+
+/**
+ * dpu_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_flush(struct drm_plane *plane);
+
+/**
+ * dpu_plane_kickoff - final plane operations before commit kickoff
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_kickoff(struct drm_plane *plane);
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error);
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev:   Pointer to DRM device
+ * @pipe:  dpu hardware pipe identifier
+ * @primary_plane: true if this pipe is primary plane for crtc
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
+ *                   a regular plane initialization. A non-zero primary plane
+ *                   id will be passed for a virtual pipe initialization.
+ *
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+		uint32_t pipe, bool primary_plane,
+		unsigned long possible_crtcs, u32 master_plane_id);
+
+/**
+ * dpu_plane_validate_multirecti_v2 - validate the multirect planes
+ *				      against hw limitations
+ * @plane: drm plate states of the multirect pair
+ */
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
+
+/**
+ * dpu_plane_clear_multirect - clear multirect bits for the given pipe
+ * @drm_state: Pointer to DRM plane state
+ */
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
+
+/**
+ * dpu_plane_wait_input_fence - wait for input fence object
+ * @plane:   Pointer to DRM plane object
+ * @wait_ms: Wait timeout value
+ * Returns: Zero on success
+ */
+int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
+
+/**
+ * dpu_plane_color_fill - enables color fill on plane
+ * @plane:  Pointer to DRM plane object
+ * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int dpu_plane_color_fill(struct drm_plane *plane,
+		uint32_t color, uint32_t alpha);
+
+/**
+ * dpu_plane_set_revalidate - sets revalidate flag which forces a full
+ *	validation of the plane properties in the next atomic check
+ * @plane: Pointer to DRM plane object
+ * @enable: Boolean to set/unset the flag
+ */
+void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
+
+#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
new file mode 100644
index 0000000..a75eebc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
@@ -0,0 +1,249 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+
+#include "dpu_power_handle.h"
+#include "dpu_trace.h"
+
+static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
+	[DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
+	[DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
+	[DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
+};
+
+const char *dpu_power_handle_get_dbus_name(u32 bus_id)
+{
+	if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
+		return data_bus_name[bus_id];
+
+	return NULL;
+}
+
+static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
+		u32 event_type)
+{
+	struct dpu_power_event *event;
+
+	list_for_each_entry(event, &phandle->event_list, list) {
+		if (event->event_type & event_type)
+			event->cb_fnc(event_type, event->usr);
+	}
+}
+
+struct dpu_power_client *dpu_power_client_create(
+	struct dpu_power_handle *phandle, char *client_name)
+{
+	struct dpu_power_client *client;
+	static u32 id;
+
+	if (!client_name || !phandle) {
+		pr_err("client name is null or invalid power data\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
+	if (!client)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_lock(&phandle->phandle_lock);
+	strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+	client->usecase_ndx = VOTE_INDEX_DISABLE;
+	client->id = id;
+	client->active = true;
+	pr_debug("client %s created:%pK id :%d\n", client_name,
+		client, id);
+	id++;
+	list_add(&client->list, &phandle->power_client_clist);
+	mutex_unlock(&phandle->phandle_lock);
+
+	return client;
+}
+
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+	struct dpu_power_client *client)
+{
+	if (!client  || !phandle) {
+		pr_err("reg bus vote: invalid client handle\n");
+	} else if (!client->active) {
+		pr_err("dpu power deinit already done\n");
+		kfree(client);
+	} else {
+		pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+			client->name, client, client->id);
+		mutex_lock(&phandle->phandle_lock);
+		list_del_init(&client->list);
+		mutex_unlock(&phandle->phandle_lock);
+		kfree(client);
+	}
+}
+
+void dpu_power_resource_init(struct platform_device *pdev,
+	struct dpu_power_handle *phandle)
+{
+	phandle->dev = &pdev->dev;
+
+	INIT_LIST_HEAD(&phandle->power_client_clist);
+	INIT_LIST_HEAD(&phandle->event_list);
+
+	mutex_init(&phandle->phandle_lock);
+}
+
+void dpu_power_resource_deinit(struct platform_device *pdev,
+	struct dpu_power_handle *phandle)
+{
+	struct dpu_power_client *curr_client, *next_client;
+	struct dpu_power_event *curr_event, *next_event;
+
+	if (!phandle || !pdev) {
+		pr_err("invalid input param\n");
+		return;
+	}
+
+	mutex_lock(&phandle->phandle_lock);
+	list_for_each_entry_safe(curr_client, next_client,
+			&phandle->power_client_clist, list) {
+		pr_err("client:%s-%d still registered with refcount:%d\n",
+				curr_client->name, curr_client->id,
+				curr_client->refcount);
+		curr_client->active = false;
+		list_del(&curr_client->list);
+	}
+
+	list_for_each_entry_safe(curr_event, next_event,
+			&phandle->event_list, list) {
+		pr_err("event:%d, client:%s still registered\n",
+				curr_event->event_type,
+				curr_event->client_name);
+		curr_event->active = false;
+		list_del(&curr_event->list);
+	}
+	mutex_unlock(&phandle->phandle_lock);
+}
+
+int dpu_power_resource_enable(struct dpu_power_handle *phandle,
+	struct dpu_power_client *pclient, bool enable)
+{
+	bool changed = false;
+	u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
+	struct dpu_power_client *client;
+
+	if (!phandle || !pclient) {
+		pr_err("invalid input argument\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&phandle->phandle_lock);
+	if (enable)
+		pclient->refcount++;
+	else if (pclient->refcount)
+		pclient->refcount--;
+
+	if (pclient->refcount)
+		pclient->usecase_ndx = VOTE_INDEX_LOW;
+	else
+		pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+	list_for_each_entry(client, &phandle->power_client_clist, list) {
+		if (client->usecase_ndx < VOTE_INDEX_MAX &&
+		    client->usecase_ndx > max_usecase_ndx)
+			max_usecase_ndx = client->usecase_ndx;
+	}
+
+	if (phandle->current_usecase_ndx != max_usecase_ndx) {
+		changed = true;
+		prev_usecase_ndx = phandle->current_usecase_ndx;
+		phandle->current_usecase_ndx = max_usecase_ndx;
+	}
+
+	pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
+		__builtin_return_address(0), changed, max_usecase_ndx,
+		pclient->name, pclient->id, enable, pclient->refcount);
+
+	if (!changed)
+		goto end;
+
+	if (enable) {
+		dpu_power_event_trigger_locked(phandle,
+				DPU_POWER_EVENT_PRE_ENABLE);
+		dpu_power_event_trigger_locked(phandle,
+				DPU_POWER_EVENT_POST_ENABLE);
+
+	} else {
+		dpu_power_event_trigger_locked(phandle,
+				DPU_POWER_EVENT_PRE_DISABLE);
+		dpu_power_event_trigger_locked(phandle,
+				DPU_POWER_EVENT_POST_DISABLE);
+	}
+
+end:
+	mutex_unlock(&phandle->phandle_lock);
+	return 0;
+}
+
+struct dpu_power_event *dpu_power_handle_register_event(
+		struct dpu_power_handle *phandle,
+		u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+		void *usr, char *client_name)
+{
+	struct dpu_power_event *event;
+
+	if (!phandle) {
+		pr_err("invalid power handle\n");
+		return ERR_PTR(-EINVAL);
+	} else if (!cb_fnc || !event_type) {
+		pr_err("no callback fnc or event type\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
+	if (!event)
+		return ERR_PTR(-ENOMEM);
+
+	event->event_type = event_type;
+	event->cb_fnc = cb_fnc;
+	event->usr = usr;
+	strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
+	event->active = true;
+
+	mutex_lock(&phandle->phandle_lock);
+	list_add(&event->list, &phandle->event_list);
+	mutex_unlock(&phandle->phandle_lock);
+
+	return event;
+}
+
+void dpu_power_handle_unregister_event(
+		struct dpu_power_handle *phandle,
+		struct dpu_power_event *event)
+{
+	if (!phandle || !event) {
+		pr_err("invalid phandle or event\n");
+	} else if (!event->active) {
+		pr_err("power handle deinit already done\n");
+		kfree(event);
+	} else {
+		mutex_lock(&phandle->phandle_lock);
+		list_del_init(&event->list);
+		mutex_unlock(&phandle->phandle_lock);
+		kfree(event);
+	}
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
new file mode 100644
index 0000000..344f744
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
@@ -0,0 +1,225 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DPU_POWER_HANDLE_H_
+#define _DPU_POWER_HANDLE_H_
+
+#define MAX_CLIENT_NAME_LEN 128
+
+#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	0
+#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
+#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	1600000000
+#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
+
+#include "dpu_io_util.h"
+
+/* event will be triggered before power handler disable */
+#define DPU_POWER_EVENT_PRE_DISABLE	0x1
+
+/* event will be triggered after power handler disable */
+#define DPU_POWER_EVENT_POST_DISABLE	0x2
+
+/* event will be triggered before power handler enable */
+#define DPU_POWER_EVENT_PRE_ENABLE	0x4
+
+/* event will be triggered after power handler enable */
+#define DPU_POWER_EVENT_POST_ENABLE	0x8
+
+/**
+ * mdss_bus_vote_type: register bus vote type
+ * VOTE_INDEX_DISABLE: removes the client vote
+ * VOTE_INDEX_LOW: keeps the lowest vote for register bus
+ * VOTE_INDEX_MAX: invalid
+ */
+enum mdss_bus_vote_type {
+	VOTE_INDEX_DISABLE,
+	VOTE_INDEX_LOW,
+	VOTE_INDEX_MAX,
+};
+
+/**
+ * enum dpu_power_handle_data_bus_client - type of axi bus clients
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
+ */
+enum dpu_power_handle_data_bus_client {
+	DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+	DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+	DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
+};
+
+/**
+ * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
+ * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
+ */
+enum DPU_POWER_HANDLE_DBUS_ID {
+	DPU_POWER_HANDLE_DBUS_ID_MNOC,
+	DPU_POWER_HANDLE_DBUS_ID_LLCC,
+	DPU_POWER_HANDLE_DBUS_ID_EBI,
+	DPU_POWER_HANDLE_DBUS_ID_MAX,
+};
+
+/**
+ * struct dpu_power_client: stores the power client for dpu driver
+ * @name:	name of the client
+ * @usecase_ndx: current regs bus vote type
+ * @refcount:	current refcount if multiple modules are using same
+ *              same client for enable/disable. Power module will
+ *              aggregate the refcount and vote accordingly for this
+ *              client.
+ * @id:		assigned during create. helps for debugging.
+ * @list:	list to attach power handle master list
+ * @ab:         arbitrated bandwidth for each bus client
+ * @ib:         instantaneous bandwidth for each bus client
+ * @active:	inidcates the state of dpu power handle
+ */
+struct dpu_power_client {
+	char name[MAX_CLIENT_NAME_LEN];
+	short usecase_ndx;
+	short refcount;
+	u32 id;
+	struct list_head list;
+	u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+	u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+	bool active;
+};
+
+/*
+ * struct dpu_power_event - local event registration structure
+ * @client_name: name of the client registering
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback event trigger
+ * @event: refer to DPU_POWER_HANDLE_EVENT_*
+ * @list: list to attach event master list
+ * @active: indicates the state of dpu power handle
+ */
+struct dpu_power_event {
+	char client_name[MAX_CLIENT_NAME_LEN];
+	void (*cb_fnc)(u32 event_type, void *usr);
+	void *usr;
+	u32 event_type;
+	struct list_head list;
+	bool active;
+};
+
+/**
+ * struct dpu_power_handle: power handle main struct
+ * @client_clist: master list to store all clients
+ * @phandle_lock: lock to synchronize the enable/disable
+ * @dev: pointer to device structure
+ * @usecase_ndx: current usecase index
+ * @event_list: current power handle event list
+ */
+struct dpu_power_handle {
+	struct list_head power_client_clist;
+	struct mutex phandle_lock;
+	struct device *dev;
+	u32 current_usecase_ndx;
+	struct list_head event_list;
+};
+
+/**
+ * dpu_power_resource_init() - initializes the dpu power handle
+ * @pdev:   platform device to search the power resources
+ * @pdata:  power handle to store the power resources
+ */
+void dpu_power_resource_init(struct platform_device *pdev,
+	struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_resource_deinit() - release the dpu power handle
+ * @pdev:   platform device for power resources
+ * @pdata:  power handle containing the resources
+ *
+ * Return: error code.
+ */
+void dpu_power_resource_deinit(struct platform_device *pdev,
+	struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_client_create() - create the client on power handle
+ * @pdata:  power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: error code.
+ */
+struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
+	char *client_name);
+
+/**
+ * dpu_power_client_destroy() - destroy the client on power handle
+ * @pdata:  power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: none
+ */
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+	struct dpu_power_client *client);
+
+/**
+ * dpu_power_resource_enable() - enable/disable the power resources
+ * @pdata:  power handle containing the resources
+ * @client: client information to enable/disable its vote
+ * @enable: boolean request for enable/disable
+ *
+ * Return: error code.
+ */
+int dpu_power_resource_enable(struct dpu_power_handle *pdata,
+	struct dpu_power_client *pclient, bool enable);
+
+/**
+ * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
+ * @phandle:  power handle containing the resources
+ * @client: client information to bandwidth control
+ * @enable: true to enable bandwidth for data base
+ *
+ * Return: none
+ */
+void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
+		struct dpu_power_client *pclient, int enable);
+
+/**
+ * dpu_power_handle_register_event - register a callback function for an event.
+ *	Clients can register for multiple events with a single register.
+ *	Any block with access to phandle can register for the event
+ *	notification.
+ * @phandle:	power handle containing the resources
+ * @event_type:	event type to register; refer DPU_POWER_HANDLE_EVENT_*
+ * @cb_fnc:	pointer to desired callback function
+ * @usr:	user pointer to pass to callback on event trigger
+ *
+ * Return:	event pointer if success, or error code otherwise
+ */
+struct dpu_power_event *dpu_power_handle_register_event(
+		struct dpu_power_handle *phandle,
+		u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+		void *usr, char *client_name);
+/**
+ * dpu_power_handle_unregister_event - unregister callback for event(s)
+ * @phandle:	power handle containing the resources
+ * @event:	event pointer returned after power handle register
+ */
+void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
+		struct dpu_power_event *event);
+
+/**
+ * dpu_power_handle_get_dbus_name - get name of given data bus identifier
+ * @bus_id:	data bus identifier
+ * Return:	Pointer to name string if success; NULL otherwise
+ */
+const char *dpu_power_handle_get_dbus_name(u32 bus_id);
+
+#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
new file mode 100644
index 0000000..13c0a36
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm:%s] " fmt, __func__
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_intf.h"
+#include "dpu_encoder.h"
+#include "dpu_trace.h"
+
+#define RESERVED_BY_OTHER(h, r) \
+	((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+
+#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
+#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
+#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS))
+#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
+				(t).num_comp_enc == (r).num_enc && \
+				(t).num_intf == (r).num_intf)
+
+struct dpu_rm_topology_def {
+	enum dpu_rm_topology_name top_name;
+	int num_lm;
+	int num_comp_enc;
+	int num_intf;
+	int num_ctl;
+	int needs_split_display;
+};
+
+static const struct dpu_rm_topology_def g_top_table[] = {
+	{   DPU_RM_TOPOLOGY_NONE,                 0, 0, 0, 0, false },
+	{   DPU_RM_TOPOLOGY_SINGLEPIPE,           1, 0, 1, 1, false },
+	{   DPU_RM_TOPOLOGY_DUALPIPE,             2, 0, 2, 2, true  },
+	{   DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,     2, 0, 1, 1, false },
+};
+
+/**
+ * struct dpu_rm_requirements - Reservation requirements parameter bundle
+ * @top_ctrl:  topology control preference from kernel client
+ * @top:       selected topology for the display
+ * @hw_res:	   Hardware resources required as reported by the encoders
+ */
+struct dpu_rm_requirements {
+	uint64_t top_ctrl;
+	const struct dpu_rm_topology_def *topology;
+	struct dpu_encoder_hw_resources hw_res;
+};
+
+/**
+ * struct dpu_rm_rsvp - Use Case Reservation tagging structure
+ *	Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
+ *	By using as a tag, rather than lists of pointers to HW blocks used
+ *	we can avoid some list management since we don't know how many blocks
+ *	of each type a given use case may require.
+ * @list:	List head for list of all reservations
+ * @seq:	Global RSVP sequence number for debugging, especially for
+ *		differentiating differenct allocations for same encoder.
+ * @enc_id:	Reservations are tracked by Encoder DRM object ID.
+ *		CRTCs may be connected to multiple Encoders.
+ *		An encoder or connector id identifies the display path.
+ * @topology	DRM<->HW topology use case
+ */
+struct dpu_rm_rsvp {
+	struct list_head list;
+	uint32_t seq;
+	uint32_t enc_id;
+	enum dpu_rm_topology_name topology;
+};
+
+/**
+ * struct dpu_rm_hw_blk - hardware block tracking list member
+ * @list:	List head for list of all hardware blocks tracking items
+ * @rsvp:	Pointer to use case reservation if reserved by a client
+ * @rsvp_nxt:	Temporary pointer used during reservation to the incoming
+ *		request. Will be swapped into rsvp if proposal is accepted
+ * @type:	Type of hardware block this structure tracks
+ * @id:		Hardware ID number, within it's own space, ie. LM_X
+ * @catalog:	Pointer to the hardware catalog entry for this block
+ * @hw:		Pointer to the hardware register access object for this block
+ */
+struct dpu_rm_hw_blk {
+	struct list_head list;
+	struct dpu_rm_rsvp *rsvp;
+	struct dpu_rm_rsvp *rsvp_nxt;
+	enum dpu_hw_blk_type type;
+	uint32_t id;
+	struct dpu_hw_blk *hw;
+};
+
+/**
+ * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
+ */
+enum dpu_rm_dbg_rsvp_stage {
+	DPU_RM_STAGE_BEGIN,
+	DPU_RM_STAGE_AFTER_CLEAR,
+	DPU_RM_STAGE_AFTER_RSVPNEXT,
+	DPU_RM_STAGE_FINAL
+};
+
+static void _dpu_rm_print_rsvps(
+		struct dpu_rm *rm,
+		enum dpu_rm_dbg_rsvp_stage stage)
+{
+	struct dpu_rm_rsvp *rsvp;
+	struct dpu_rm_hw_blk *blk;
+	enum dpu_hw_blk_type type;
+
+	DPU_DEBUG("%d\n", stage);
+
+	list_for_each_entry(rsvp, &rm->rsvps, list) {
+		DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
+			      rsvp->enc_id, rsvp->topology);
+	}
+
+	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+		list_for_each_entry(blk, &rm->hw_blks[type], list) {
+			if (!blk->rsvp && !blk->rsvp_nxt)
+				continue;
+
+			DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
+				(blk->rsvp) ? blk->rsvp->seq : 0,
+				(blk->rsvp) ? blk->rsvp->enc_id : 0,
+				(blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+				(blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+				blk->type, blk->id);
+		}
+	}
+}
+
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
+{
+	return rm->hw_mdp;
+}
+
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology)
+{
+	int i;
+
+	for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
+		if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
+			return g_top_table[i].top_name;
+
+	return DPU_RM_TOPOLOGY_NONE;
+}
+
+void dpu_rm_init_hw_iter(
+		struct dpu_rm_hw_iter *iter,
+		uint32_t enc_id,
+		enum dpu_hw_blk_type type)
+{
+	memset(iter, 0, sizeof(*iter));
+	iter->enc_id = enc_id;
+	iter->type = type;
+}
+
+static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+	struct list_head *blk_list;
+
+	if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
+		DPU_ERROR("invalid rm\n");
+		return false;
+	}
+
+	i->hw = NULL;
+	blk_list = &rm->hw_blks[i->type];
+
+	if (i->blk && (&i->blk->list == blk_list)) {
+		DPU_DEBUG("attempt resume iteration past last\n");
+		return false;
+	}
+
+	i->blk = list_prepare_entry(i->blk, blk_list, list);
+
+	list_for_each_entry_continue(i->blk, blk_list, list) {
+		struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
+
+		if (i->blk->type != i->type) {
+			DPU_ERROR("found incorrect block type %d on %d list\n",
+					i->blk->type, i->type);
+			return false;
+		}
+
+		if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+			i->hw = i->blk->hw;
+			DPU_DEBUG("found type %d id %d for enc %d\n",
+					i->type, i->blk->id, i->enc_id);
+			return true;
+		}
+	}
+
+	DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
+
+	return false;
+}
+
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+	bool ret;
+
+	mutex_lock(&rm->rm_lock);
+	ret = _dpu_rm_get_hw_locked(rm, i);
+	mutex_unlock(&rm->rm_lock);
+
+	return ret;
+}
+
+static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
+{
+	switch (type) {
+	case DPU_HW_BLK_LM:
+		dpu_hw_lm_destroy(hw);
+		break;
+	case DPU_HW_BLK_CTL:
+		dpu_hw_ctl_destroy(hw);
+		break;
+	case DPU_HW_BLK_CDM:
+		dpu_hw_cdm_destroy(hw);
+		break;
+	case DPU_HW_BLK_PINGPONG:
+		dpu_hw_pingpong_destroy(hw);
+		break;
+	case DPU_HW_BLK_INTF:
+		dpu_hw_intf_destroy(hw);
+		break;
+	case DPU_HW_BLK_SSPP:
+		/* SSPPs are not managed by the resource manager */
+	case DPU_HW_BLK_TOP:
+		/* Top is a singleton, not managed in hw_blks list */
+	case DPU_HW_BLK_MAX:
+	default:
+		DPU_ERROR("unsupported block type %d\n", type);
+		break;
+	}
+}
+
+int dpu_rm_destroy(struct dpu_rm *rm)
+{
+
+	struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+	struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
+	enum dpu_hw_blk_type type;
+
+	if (!rm) {
+		DPU_ERROR("invalid rm\n");
+		return -EINVAL;
+	}
+
+	list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
+		list_del(&rsvp_cur->list);
+		kfree(rsvp_cur);
+	}
+
+
+	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+		list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
+				list) {
+			list_del(&hw_cur->list);
+			_dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+			kfree(hw_cur);
+		}
+	}
+
+	dpu_hw_mdp_destroy(rm->hw_mdp);
+	rm->hw_mdp = NULL;
+
+	mutex_destroy(&rm->rm_lock);
+
+	return 0;
+}
+
+static int _dpu_rm_hw_blk_create(
+		struct dpu_rm *rm,
+		struct dpu_mdss_cfg *cat,
+		void __iomem *mmio,
+		enum dpu_hw_blk_type type,
+		uint32_t id,
+		void *hw_catalog_info)
+{
+	struct dpu_rm_hw_blk *blk;
+	struct dpu_hw_mdp *hw_mdp;
+	void *hw;
+
+	hw_mdp = rm->hw_mdp;
+
+	switch (type) {
+	case DPU_HW_BLK_LM:
+		hw = dpu_hw_lm_init(id, mmio, cat);
+		break;
+	case DPU_HW_BLK_CTL:
+		hw = dpu_hw_ctl_init(id, mmio, cat);
+		break;
+	case DPU_HW_BLK_CDM:
+		hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp);
+		break;
+	case DPU_HW_BLK_PINGPONG:
+		hw = dpu_hw_pingpong_init(id, mmio, cat);
+		break;
+	case DPU_HW_BLK_INTF:
+		hw = dpu_hw_intf_init(id, mmio, cat);
+		break;
+	case DPU_HW_BLK_SSPP:
+		/* SSPPs are not managed by the resource manager */
+	case DPU_HW_BLK_TOP:
+		/* Top is a singleton, not managed in hw_blks list */
+	case DPU_HW_BLK_MAX:
+	default:
+		DPU_ERROR("unsupported block type %d\n", type);
+		return -EINVAL;
+	}
+
+	if (IS_ERR_OR_NULL(hw)) {
+		DPU_ERROR("failed hw object creation: type %d, err %ld\n",
+				type, PTR_ERR(hw));
+		return -EFAULT;
+	}
+
+	blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+	if (!blk) {
+		_dpu_rm_hw_destroy(type, hw);
+		return -ENOMEM;
+	}
+
+	blk->type = type;
+	blk->id = id;
+	blk->hw = hw;
+	list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+	return 0;
+}
+
+int dpu_rm_init(struct dpu_rm *rm,
+		struct dpu_mdss_cfg *cat,
+		void __iomem *mmio,
+		struct drm_device *dev)
+{
+	int rc, i;
+	enum dpu_hw_blk_type type;
+
+	if (!rm || !cat || !mmio || !dev) {
+		DPU_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	/* Clear, setup lists */
+	memset(rm, 0, sizeof(*rm));
+
+	mutex_init(&rm->rm_lock);
+
+	INIT_LIST_HEAD(&rm->rsvps);
+	for (type = 0; type < DPU_HW_BLK_MAX; type++)
+		INIT_LIST_HEAD(&rm->hw_blks[type]);
+
+	rm->dev = dev;
+
+	/* Some of the sub-blocks require an mdptop to be created */
+	rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
+	if (IS_ERR_OR_NULL(rm->hw_mdp)) {
+		rc = PTR_ERR(rm->hw_mdp);
+		rm->hw_mdp = NULL;
+		DPU_ERROR("failed: mdp hw not available\n");
+		goto fail;
+	}
+
+	/* Interrogate HW catalog and create tracking items for hw blocks */
+	for (i = 0; i < cat->mixer_count; i++) {
+		struct dpu_lm_cfg *lm = &cat->mixer[i];
+
+		if (lm->pingpong == PINGPONG_MAX) {
+			DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
+			continue;
+		}
+
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
+				cat->mixer[i].id, &cat->mixer[i]);
+		if (rc) {
+			DPU_ERROR("failed: lm hw not available\n");
+			goto fail;
+		}
+
+		if (!rm->lm_max_width) {
+			rm->lm_max_width = lm->sblk->maxwidth;
+		} else if (rm->lm_max_width != lm->sblk->maxwidth) {
+			/*
+			 * Don't expect to have hw where lm max widths differ.
+			 * If found, take the min.
+			 */
+			DPU_ERROR("unsupported: lm maxwidth differs\n");
+			if (rm->lm_max_width > lm->sblk->maxwidth)
+				rm->lm_max_width = lm->sblk->maxwidth;
+		}
+	}
+
+	for (i = 0; i < cat->pingpong_count; i++) {
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
+				cat->pingpong[i].id, &cat->pingpong[i]);
+		if (rc) {
+			DPU_ERROR("failed: pp hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->intf_count; i++) {
+		if (cat->intf[i].type == INTF_NONE) {
+			DPU_DEBUG("skip intf %d with type none\n", i);
+			continue;
+		}
+
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
+				cat->intf[i].id, &cat->intf[i]);
+		if (rc) {
+			DPU_ERROR("failed: intf hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->ctl_count; i++) {
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
+				cat->ctl[i].id, &cat->ctl[i]);
+		if (rc) {
+			DPU_ERROR("failed: ctl hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->cdm_count; i++) {
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM,
+				cat->cdm[i].id, &cat->cdm[i]);
+		if (rc) {
+			DPU_ERROR("failed: cdm hw not available\n");
+			goto fail;
+		}
+	}
+
+	return 0;
+
+fail:
+	dpu_rm_destroy(rm);
+
+	return rc;
+}
+
+/**
+ * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ *	proposed use case requirements, incl. hardwired dependent blocks like
+ *	pingpong
+ * @rm: dpu resource manager handle
+ * @rsvp: reservation currently being created
+ * @reqs: proposed use case requirements
+ * @lm: proposed layer mixer, function checks if lm, and all other hardwired
+ *      blocks connected to the lm (pp) is available and appropriate
+ * @pp: output parameter, pingpong block attached to the layer mixer.
+ *      NULL if pp was not available, or not matching requirements.
+ * @primary_lm: if non-null, this function check if lm is compatible primary_lm
+ *              as well as satisfying all other requirements
+ * @Return: true if lm matches all requirements, false otherwise
+ */
+static bool _dpu_rm_check_lm_and_get_connected_blks(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct dpu_rm_requirements *reqs,
+		struct dpu_rm_hw_blk *lm,
+		struct dpu_rm_hw_blk **pp,
+		struct dpu_rm_hw_blk *primary_lm)
+{
+	const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
+	struct dpu_rm_hw_iter iter;
+
+	*pp = NULL;
+
+	DPU_DEBUG("check lm %d pp %d\n",
+			   lm_cfg->id, lm_cfg->pingpong);
+
+	/* Check if this layer mixer is a peer of the proposed primary LM */
+	if (primary_lm) {
+		const struct dpu_lm_cfg *prim_lm_cfg =
+				to_dpu_hw_mixer(primary_lm->hw)->cap;
+
+		if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+			DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
+					prim_lm_cfg->id);
+			return false;
+		}
+	}
+
+	/* Already reserved? */
+	if (RESERVED_BY_OTHER(lm, rsvp)) {
+		DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
+		return false;
+	}
+
+	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
+	while (_dpu_rm_get_hw_locked(rm, &iter)) {
+		if (iter.blk->id == lm_cfg->pingpong) {
+			*pp = iter.blk;
+			break;
+		}
+	}
+
+	if (!*pp) {
+		DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+		return false;
+	}
+
+	if (RESERVED_BY_OTHER(*pp, rsvp)) {
+		DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
+				(*pp)->id);
+		return false;
+	}
+
+	return true;
+}
+
+static int _dpu_rm_reserve_lms(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct dpu_rm_requirements *reqs)
+
+{
+	struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
+	struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
+	struct dpu_rm_hw_iter iter_i, iter_j;
+	int lm_count = 0;
+	int i, rc = 0;
+
+	if (!reqs->topology->num_lm) {
+		DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
+		return -EINVAL;
+	}
+
+	/* Find a primary mixer */
+	dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
+	while (lm_count != reqs->topology->num_lm &&
+			_dpu_rm_get_hw_locked(rm, &iter_i)) {
+		memset(&lm, 0, sizeof(lm));
+		memset(&pp, 0, sizeof(pp));
+
+		lm_count = 0;
+		lm[lm_count] = iter_i.blk;
+
+		if (!_dpu_rm_check_lm_and_get_connected_blks(
+				rm, rsvp, reqs, lm[lm_count],
+				&pp[lm_count], NULL))
+			continue;
+
+		++lm_count;
+
+		/* Valid primary mixer found, find matching peers */
+		dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
+
+		while (lm_count != reqs->topology->num_lm &&
+				_dpu_rm_get_hw_locked(rm, &iter_j)) {
+			if (iter_i.blk == iter_j.blk)
+				continue;
+
+			if (!_dpu_rm_check_lm_and_get_connected_blks(
+					rm, rsvp, reqs, iter_j.blk,
+					&pp[lm_count], iter_i.blk))
+				continue;
+
+			lm[lm_count] = iter_j.blk;
+			++lm_count;
+		}
+	}
+
+	if (lm_count != reqs->topology->num_lm) {
+		DPU_DEBUG("unable to find appropriate mixers\n");
+		return -ENAVAIL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(lm); i++) {
+		if (!lm[i])
+			break;
+
+		lm[i]->rsvp_nxt = rsvp;
+		pp[i]->rsvp_nxt = rsvp;
+
+		trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
+					 pp[i]->id);
+	}
+
+	return rc;
+}
+
+static int _dpu_rm_reserve_ctls(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		const struct dpu_rm_topology_def *top)
+{
+	struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
+	struct dpu_rm_hw_iter iter;
+	int i = 0;
+
+	memset(&ctls, 0, sizeof(ctls));
+
+	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
+	while (_dpu_rm_get_hw_locked(rm, &iter)) {
+		const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
+		unsigned long features = ctl->caps->features;
+		bool has_split_display;
+
+		if (RESERVED_BY_OTHER(iter.blk, rsvp))
+			continue;
+
+		has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
+
+		DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
+
+		if (top->needs_split_display != has_split_display)
+			continue;
+
+		ctls[i] = iter.blk;
+		DPU_DEBUG("ctl %d match\n", iter.blk->id);
+
+		if (++i == top->num_ctl)
+			break;
+	}
+
+	if (i != top->num_ctl)
+		return -ENAVAIL;
+
+	for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
+		ctls[i]->rsvp_nxt = rsvp;
+		trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
+					  rsvp->enc_id);
+	}
+
+	return 0;
+}
+
+static int _dpu_rm_reserve_cdm(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		uint32_t id,
+		enum dpu_hw_blk_type type)
+{
+	struct dpu_rm_hw_iter iter;
+
+	DRM_DEBUG_KMS("type %d id %d\n", type, id);
+
+	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM);
+	while (_dpu_rm_get_hw_locked(rm, &iter)) {
+		const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw);
+		const struct dpu_cdm_cfg *caps = cdm->caps;
+		bool match = false;
+
+		if (RESERVED_BY_OTHER(iter.blk, rsvp))
+			continue;
+
+		if (type == DPU_HW_BLK_INTF && id != INTF_MAX)
+			match = test_bit(id, &caps->intf_connect);
+
+		DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n",
+			      iter.blk->type, iter.blk->id, rsvp->enc_id,
+			      caps->intf_connect, match);
+
+		if (!match)
+			continue;
+
+		trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type,
+					 rsvp->enc_id);
+		iter.blk->rsvp_nxt = rsvp;
+		break;
+	}
+
+	if (!iter.hw) {
+		DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
+		return -ENAVAIL;
+	}
+
+	return 0;
+}
+
+static int _dpu_rm_reserve_intf(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		uint32_t id,
+		enum dpu_hw_blk_type type,
+		bool needs_cdm)
+{
+	struct dpu_rm_hw_iter iter;
+	int ret = 0;
+
+	/* Find the block entry in the rm, and note the reservation */
+	dpu_rm_init_hw_iter(&iter, 0, type);
+	while (_dpu_rm_get_hw_locked(rm, &iter)) {
+		if (iter.blk->id != id)
+			continue;
+
+		if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+			DPU_ERROR("type %d id %d already reserved\n", type, id);
+			return -ENAVAIL;
+		}
+
+		iter.blk->rsvp_nxt = rsvp;
+		trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
+					  rsvp->enc_id);
+		break;
+	}
+
+	/* Shouldn't happen since intfs are fixed at probe */
+	if (!iter.hw) {
+		DPU_ERROR("couldn't find type %d id %d\n", type, id);
+		return -EINVAL;
+	}
+
+	if (needs_cdm)
+		ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type);
+
+	return ret;
+}
+
+static int _dpu_rm_reserve_intf_related_hw(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct dpu_encoder_hw_resources *hw_res)
+{
+	int i, ret = 0;
+	u32 id;
+
+	for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
+		if (hw_res->intfs[i] == INTF_MODE_NONE)
+			continue;
+		id = i + INTF_0;
+		ret = _dpu_rm_reserve_intf(rm, rsvp, id,
+				DPU_HW_BLK_INTF, hw_res->needs_cdm);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int _dpu_rm_make_next_rsvp(
+		struct dpu_rm *rm,
+		struct drm_encoder *enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct dpu_rm_rsvp *rsvp,
+		struct dpu_rm_requirements *reqs)
+{
+	int ret;
+	struct dpu_rm_topology_def topology;
+
+	/* Create reservation info, tag reserved blocks with it as we go */
+	rsvp->seq = ++rm->rsvp_next_seq;
+	rsvp->enc_id = enc->base.id;
+	rsvp->topology = reqs->topology->top_name;
+	list_add_tail(&rsvp->list, &rm->rsvps);
+
+	ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
+	if (ret) {
+		DPU_ERROR("unable to find appropriate mixers\n");
+		return ret;
+	}
+
+	/*
+	 * Do assignment preferring to give away low-resource CTLs first:
+	 * - Check mixers without Split Display
+	 * - Only then allow to grab from CTLs with split display capability
+	 */
+	_dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
+	if (ret && !reqs->topology->needs_split_display) {
+		memcpy(&topology, reqs->topology, sizeof(topology));
+		topology.needs_split_display = true;
+		_dpu_rm_reserve_ctls(rm, rsvp, &topology);
+	}
+	if (ret) {
+		DPU_ERROR("unable to find appropriate CTL\n");
+		return ret;
+	}
+
+	/* Assign INTFs and blks whose usage is tied to them: CTL & CDM */
+	ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static int _dpu_rm_populate_requirements(
+		struct dpu_rm *rm,
+		struct drm_encoder *enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct dpu_rm_requirements *reqs,
+		struct msm_display_topology req_topology)
+{
+	int i;
+
+	memset(reqs, 0, sizeof(*reqs));
+
+	dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
+
+	for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
+		if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
+					req_topology)) {
+			reqs->topology = &g_top_table[i];
+			break;
+		}
+	}
+
+	if (!reqs->topology) {
+		DPU_ERROR("invalid topology for the display\n");
+		return -EINVAL;
+	}
+
+	/**
+	 * Set the requirement based on caps if not set from user space
+	 * This will ensure to select LM tied with DS blocks
+	 * Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
+	 */
+	if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
+		conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
+		reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS);
+
+	DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
+		      reqs->hw_res.display_num_of_h_tiles);
+	DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
+		      reqs->topology->num_lm, reqs->topology->num_ctl,
+		      reqs->topology->top_name,
+		      reqs->topology->needs_split_display);
+
+	return 0;
+}
+
+static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
+		struct dpu_rm *rm,
+		struct drm_encoder *enc)
+{
+	struct dpu_rm_rsvp *i;
+
+	if (!rm || !enc) {
+		DPU_ERROR("invalid params\n");
+		return NULL;
+	}
+
+	if (list_empty(&rm->rsvps))
+		return NULL;
+
+	list_for_each_entry(i, &rm->rsvps, list)
+		if (i->enc_id == enc->base.id)
+			return i;
+
+	return NULL;
+}
+
+static struct drm_connector *_dpu_rm_get_connector(
+		struct drm_encoder *enc)
+{
+	struct drm_connector *conn = NULL;
+	struct list_head *connector_list =
+			&enc->dev->mode_config.connector_list;
+
+	list_for_each_entry(conn, connector_list, head)
+		if (conn->encoder == enc)
+			return conn;
+
+	return NULL;
+}
+
+/**
+ * _dpu_rm_release_rsvp - release resources and release a reservation
+ * @rm:	KMS handle
+ * @rsvp:	RSVP pointer to release and release resources for
+ */
+static void _dpu_rm_release_rsvp(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct drm_connector *conn)
+{
+	struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
+	struct dpu_rm_hw_blk *blk;
+	enum dpu_hw_blk_type type;
+
+	if (!rsvp)
+		return;
+
+	DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
+
+	list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
+		if (rsvp == rsvp_c) {
+			list_del(&rsvp_c->list);
+			break;
+		}
+	}
+
+	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+		list_for_each_entry(blk, &rm->hw_blks[type], list) {
+			if (blk->rsvp == rsvp) {
+				blk->rsvp = NULL;
+				DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
+						rsvp->seq, rsvp->enc_id,
+						blk->type, blk->id);
+			}
+			if (blk->rsvp_nxt == rsvp) {
+				blk->rsvp_nxt = NULL;
+				DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
+						rsvp->seq, rsvp->enc_id,
+						blk->type, blk->id);
+			}
+		}
+	}
+
+	kfree(rsvp);
+}
+
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+{
+	struct dpu_rm_rsvp *rsvp;
+	struct drm_connector *conn;
+
+	if (!rm || !enc) {
+		DPU_ERROR("invalid params\n");
+		return;
+	}
+
+	mutex_lock(&rm->rm_lock);
+
+	rsvp = _dpu_rm_get_rsvp(rm, enc);
+	if (!rsvp) {
+		DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+		goto end;
+	}
+
+	conn = _dpu_rm_get_connector(enc);
+	if (!conn) {
+		DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
+		goto end;
+	}
+
+	_dpu_rm_release_rsvp(rm, rsvp, conn);
+end:
+	mutex_unlock(&rm->rm_lock);
+}
+
+static int _dpu_rm_commit_rsvp(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct drm_connector_state *conn_state)
+{
+	struct dpu_rm_hw_blk *blk;
+	enum dpu_hw_blk_type type;
+	int ret = 0;
+
+	/* Swap next rsvp to be the active */
+	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+		list_for_each_entry(blk, &rm->hw_blks[type], list) {
+			if (blk->rsvp_nxt) {
+				blk->rsvp = blk->rsvp_nxt;
+				blk->rsvp_nxt = NULL;
+			}
+		}
+	}
+
+	if (!ret)
+		DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
+			      rsvp->topology);
+
+	return ret;
+}
+
+int dpu_rm_reserve(
+		struct dpu_rm *rm,
+		struct drm_encoder *enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct msm_display_topology topology,
+		bool test_only)
+{
+	struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+	struct dpu_rm_requirements reqs;
+	int ret;
+
+	if (!rm || !enc || !crtc_state || !conn_state) {
+		DPU_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	/* Check if this is just a page-flip */
+	if (!drm_atomic_crtc_needs_modeset(crtc_state))
+		return 0;
+
+	DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
+		      conn_state->connector->base.id, enc->base.id,
+		      crtc_state->crtc->base.id, test_only);
+
+	mutex_lock(&rm->rm_lock);
+
+	_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
+
+	ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
+			conn_state, &reqs, topology);
+	if (ret) {
+		DPU_ERROR("failed to populate hw requirements\n");
+		goto end;
+	}
+
+	/*
+	 * We only support one active reservation per-hw-block. But to implement
+	 * transactional semantics for test-only, and for allowing failure while
+	 * modifying your existing reservation, over the course of this
+	 * function we can have two reservations:
+	 * Current: Existing reservation
+	 * Next: Proposed reservation. The proposed reservation may fail, or may
+	 *       be discarded if in test-only mode.
+	 * If reservation is successful, and we're not in test-only, then we
+	 * replace the current with the next.
+	 */
+	rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
+	if (!rsvp_nxt) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
+
+	/*
+	 * User can request that we clear out any reservation during the
+	 * atomic_check phase by using this CLEAR bit
+	 */
+	if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
+		DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
+				rsvp_cur->seq, rsvp_cur->enc_id);
+		_dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+		rsvp_cur = NULL;
+		_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
+	}
+
+	/* Check the proposed reservation, store it in hw's "next" field */
+	ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+			rsvp_nxt, &reqs);
+
+	_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
+
+	if (ret) {
+		DPU_ERROR("failed to reserve hw resources: %d\n", ret);
+		_dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+	} else if (test_only && !RM_RQ_LOCK(&reqs)) {
+		/*
+		 * Normally, if test_only, test the reservation and then undo
+		 * However, if the user requests LOCK, then keep the reservation
+		 * made during the atomic_check phase.
+		 */
+		DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
+				rsvp_nxt->seq, rsvp_nxt->enc_id);
+		_dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+	} else {
+		if (test_only && RM_RQ_LOCK(&reqs))
+			DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
+					rsvp_nxt->seq, rsvp_nxt->enc_id);
+
+		_dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+
+		ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+	}
+
+	_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
+
+end:
+	mutex_unlock(&rm->rm_lock);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
new file mode 100644
index 0000000..ffd1841
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_RM_H__
+#define __DPU_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "dpu_hw_top.h"
+
+/**
+ * enum dpu_rm_topology_name - HW resource use case in use by connector
+ * @DPU_RM_TOPOLOGY_NONE:                 No topology in use currently
+ * @DPU_RM_TOPOLOGY_SINGLEPIPE:           1 LM, 1 PP, 1 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE:             2 LM, 2 PP, 2 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE:     2 LM, 2 PP, 3DMux, 1 INTF/WB
+ */
+enum dpu_rm_topology_name {
+	DPU_RM_TOPOLOGY_NONE = 0,
+	DPU_RM_TOPOLOGY_SINGLEPIPE,
+	DPU_RM_TOPOLOGY_DUALPIPE,
+	DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
+	DPU_RM_TOPOLOGY_MAX,
+};
+
+/**
+ * enum dpu_rm_topology_control - HW resource use case in use by connector
+ * @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
+ *                              test, reserve the resources for this display.
+ *                              Normal behavior would not impact the reservation
+ *                              list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
+ *                               release any reservation held by this display.
+ *                               Normal behavior would not impact the
+ *                               reservation list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_DS  : Require layer mixers with DS capabilities
+ */
+enum dpu_rm_topology_control {
+	DPU_RM_TOPCTL_RESERVE_LOCK,
+	DPU_RM_TOPCTL_RESERVE_CLEAR,
+	DPU_RM_TOPCTL_DS,
+};
+
+/**
+ * struct dpu_rm - DPU dynamic hardware resource manager
+ * @dev: device handle for event logging purposes
+ * @rsvps: list of hardware reservations by each crtc->encoder->connector
+ * @hw_blks: array of lists of hardware resources present in the system, one
+ *	list per type of hardware block
+ * @hw_mdp: hardware object for mdp_top
+ * @lm_max_width: cached layer mixer maximum width
+ * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
+ */
+struct dpu_rm {
+	struct drm_device *dev;
+	struct list_head rsvps;
+	struct list_head hw_blks[DPU_HW_BLK_MAX];
+	struct dpu_hw_mdp *hw_mdp;
+	uint32_t lm_max_width;
+	uint32_t rsvp_next_seq;
+	struct mutex rm_lock;
+};
+
+/**
+ *  struct dpu_rm_hw_blk - resource manager internal structure
+ *	forward declaration for single iterator definition without void pointer
+ */
+struct dpu_rm_hw_blk;
+
+/**
+ * struct dpu_rm_hw_iter - iterator for use with dpu_rm
+ * @hw: dpu_hw object requested, or NULL on failure
+ * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+struct dpu_rm_hw_iter {
+	void *hw;
+	struct dpu_rm_hw_blk *blk;
+	uint32_t enc_id;
+	enum dpu_hw_blk_type type;
+};
+
+/**
+ * dpu_rm_init - Read hardware catalog and create reservation tracking objects
+ *	for all HW blocks.
+ * @rm: DPU Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mmio: mapped register io address of MDP
+ * @dev: device handle for event logging purposes
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_init(struct dpu_rm *rm,
+		struct dpu_mdss_cfg *cat,
+		void __iomem *mmio,
+		struct drm_device *dev);
+
+/**
+ * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
+ * @rm: DPU Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_destroy(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ *	the use connections and user requirements, specified through related
+ *	topology control properties, and reserve hardware blocks to that
+ *	display chain.
+ *	HW blocks can then be accessed through dpu_rm_get_* functions.
+ *	HW Reservations should be released via dpu_rm_release_hw.
+ * @rm: DPU Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @conn_state: Proposed Atomic DRM Connector State handle
+ * @topology: Pointer to topology info for the display
+ * @test_only: Atomic-Test phase, discard results (unless property overrides)
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_reserve(struct dpu_rm *rm,
+		struct drm_encoder *drm_enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct msm_display_topology topology,
+		bool test_only);
+
+/**
+ * dpu_rm_reserve - Given the encoder for the display chain, release any
+ *	HW blocks previously reserved for that use case.
+ * @rm: DPU Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+
+/**
+ * dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
+ *	This is never reserved, and is usable by any display.
+ * @rm: DPU Resource Manager handle
+ * @Return: Pointer to hw block or NULL
+ */
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
+ *	using dpu_rm_get_hw
+ * @iter: iter object to initialize
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+void dpu_rm_init_hw_iter(
+		struct dpu_rm_hw_iter *iter,
+		uint32_t enc_id,
+		enum dpu_hw_blk_type type);
+/**
+ * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
+ *	Meant to do a single pass through the hardware list to iteratively
+ *	retrieve hardware blocks of a given type for a given encoder.
+ *	Initialize an iterator object.
+ *	Set hw block type of interest. Set encoder id of interest, 0 for any.
+ *	Function returns first hw of type for that encoder.
+ *	Subsequent calls will return the next reserved hw of that type in-order.
+ *	Iterator HW pointer will be null on failure to find hw.
+ * @rm: DPU Resource Manager handle
+ * @iter: iterator object
+ * @Return: true on match found, false on no match found
+ */
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
+
+/**
+ * dpu_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int dpu_rm_check_property_topctl(uint64_t val);
+
+/**
+ * dpu_rm_get_topology_name - returns the name of the the given topology
+ *                            definition
+ * @topology: topology definition
+ * @Return: name of the topology
+ */
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology);
+
+#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
new file mode 100644
index 0000000..ae0ca50
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -0,0 +1,1007 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_DPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPU_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drm_rect.h>
+#include "dpu_crtc.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_plane.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpu
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpu_trace
+
+TRACE_EVENT(dpu_perf_set_qos_luts,
+	TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+		u32 lut, u32 lut_usage),
+	TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, fmt)
+			__field(bool, rt)
+			__field(u32, fl)
+			__field(u64, lut)
+			__field(u32, lut_usage)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->fmt = fmt;
+			__entry->rt = rt;
+			__entry->fl = fl;
+			__entry->lut = lut;
+			__entry->lut_usage = lut_usage;
+	),
+	TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
+			__entry->pnum, __entry->fmt,
+			__entry->rt, __entry->fl,
+			__entry->lut, __entry->lut_usage)
+);
+
+TRACE_EVENT(dpu_perf_set_danger_luts,
+	TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+		u32 safe_lut),
+	TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, fmt)
+			__field(u32, mode)
+			__field(u32, danger_lut)
+			__field(u32, safe_lut)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->fmt = fmt;
+			__entry->mode = mode;
+			__entry->danger_lut = danger_lut;
+			__entry->safe_lut = safe_lut;
+	),
+	TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+			__entry->pnum, __entry->fmt,
+			__entry->mode, __entry->danger_lut,
+			__entry->safe_lut)
+);
+
+TRACE_EVENT(dpu_perf_set_ot,
+	TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+	TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, xin_id)
+			__field(u32, rd_lim)
+			__field(u32, vbif_idx)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->xin_id = xin_id;
+			__entry->rd_lim = rd_lim;
+			__entry->vbif_idx = vbif_idx;
+	),
+	TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+			__entry->pnum, __entry->xin_id, __entry->rd_lim,
+			__entry->vbif_idx)
+)
+
+TRACE_EVENT(dpu_perf_update_bus,
+	TP_PROTO(int client, unsigned long long ab_quota,
+	unsigned long long ib_quota),
+	TP_ARGS(client, ab_quota, ib_quota),
+	TP_STRUCT__entry(
+			__field(int, client)
+			__field(u64, ab_quota)
+			__field(u64, ib_quota)
+	),
+	TP_fast_assign(
+			__entry->client = client;
+			__entry->ab_quota = ab_quota;
+			__entry->ib_quota = ib_quota;
+	),
+	TP_printk("Request client:%d ab=%llu ib=%llu",
+			__entry->client,
+			__entry->ab_quota,
+			__entry->ib_quota)
+)
+
+
+TRACE_EVENT(dpu_cmd_release_bw,
+	TP_PROTO(u32 crtc_id),
+	TP_ARGS(crtc_id),
+	TP_STRUCT__entry(
+			__field(u32, crtc_id)
+	),
+	TP_fast_assign(
+			__entry->crtc_id = crtc_id;
+	),
+	TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(tracing_mark_write,
+	TP_PROTO(int pid, const char *name, bool trace_begin),
+	TP_ARGS(pid, name, trace_begin),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(trace_name, name)
+			__field(bool, trace_begin)
+	),
+	TP_fast_assign(
+			__entry->pid = pid;
+			__assign_str(trace_name, name);
+			__entry->trace_begin = trace_begin;
+	),
+	TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+		__entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(dpu_trace_counter,
+	TP_PROTO(int pid, char *name, int value),
+	TP_ARGS(pid, name, value),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(counter_name, name)
+			__field(int, value)
+	),
+	TP_fast_assign(
+			__entry->pid = current->tgid;
+			__assign_str(counter_name, name);
+			__entry->value = value;
+	),
+	TP_printk("%d|%s|%d", __entry->pid,
+			__get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(dpu_perf_crtc_update,
+	TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc,
+			u64 bw_ctl_ebi, u32 core_clk_rate,
+			bool stop_req, u32 update_bus, u32 update_clk),
+	TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate,
+		stop_req, update_bus, update_clk),
+	TP_STRUCT__entry(
+			__field(u32, crtc)
+			__field(u64, bw_ctl_mnoc)
+			__field(u64, bw_ctl_llcc)
+			__field(u64, bw_ctl_ebi)
+			__field(u32, core_clk_rate)
+			__field(bool, stop_req)
+			__field(u32, update_bus)
+			__field(u32, update_clk)
+	),
+	TP_fast_assign(
+			__entry->crtc = crtc;
+			__entry->bw_ctl_mnoc = bw_ctl_mnoc;
+			__entry->bw_ctl_llcc = bw_ctl_llcc;
+			__entry->bw_ctl_ebi = bw_ctl_ebi;
+			__entry->core_clk_rate = core_clk_rate;
+			__entry->stop_req = stop_req;
+			__entry->update_bus = update_bus;
+			__entry->update_clk = update_clk;
+	),
+	 TP_printk(
+		"crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+			__entry->crtc,
+			__entry->bw_ctl_mnoc,
+			__entry->bw_ctl_llcc,
+			__entry->bw_ctl_ebi,
+			__entry->core_clk_rate,
+			__entry->stop_req,
+			__entry->update_bus,
+			__entry->update_clk)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_irq_template,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+		 int irq_idx),
+	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	enum dpu_intr_idx,	intr_idx	)
+		__field(	int,			hw_idx		)
+		__field(	int,			irq_idx		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intr_idx = intr_idx;
+		__entry->hw_idx = hw_idx;
+		__entry->irq_idx = irq_idx;
+	),
+	TP_printk("id=%u, intr=%d, hw=%d, irq=%d",
+		  __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+		  __entry->irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+		 int irq_idx),
+	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+		 int irq_idx),
+	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+
+TRACE_EVENT(dpu_enc_irq_wait_success,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+		 int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
+	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx, pp_idx, atomic_cnt),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	enum dpu_intr_idx,	intr_idx	)
+		__field(	int,			hw_idx		)
+		__field(	int,			irq_idx		)
+		__field(	enum dpu_pingpong,	pp_idx		)
+		__field(	int,			atomic_cnt	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intr_idx = intr_idx;
+		__entry->hw_idx = hw_idx;
+		__entry->irq_idx = irq_idx;
+		__entry->pp_idx = pp_idx;
+		__entry->atomic_cnt = atomic_cnt;
+	),
+	TP_printk("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+		  __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+		  __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
+);
+
+DECLARE_EVENT_CLASS(dpu_drm_obj_template,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+	),
+	TP_printk("id=%u", __entry->drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_atomic_check,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_mode_set,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_disable,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_kickoff,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff_reset,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_flip,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_vblank_cb,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_enc_enable,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+
+TRACE_EVENT(dpu_enc_enable,
+	TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
+	TP_ARGS(drm_id, hdisplay, vdisplay),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	int,			hdisplay	)
+		__field(	int,			vdisplay	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->hdisplay = hdisplay;
+		__entry->vdisplay = vdisplay;
+	),
+	TP_printk("id=%u, mode=%dx%d",
+		  __entry->drm_id, __entry->hdisplay, __entry->vdisplay)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_keyval_template,
+	TP_PROTO(uint32_t drm_id, int val),
+	TP_ARGS(drm_id, val),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id	)
+		__field(	int,		val	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->val = val;
+	),
+	TP_printk("id=%u, val=%d", __entry->drm_id, __entry->val)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_underrun_cb,
+	TP_PROTO(uint32_t drm_id, int count),
+	TP_ARGS(drm_id, count)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start,
+	TP_PROTO(uint32_t drm_id, int ctl_idx),
+	TP_ARGS(drm_id, ctl_idx)
+);
+
+TRACE_EVENT(dpu_enc_atomic_check_flags,
+	TP_PROTO(uint32_t drm_id, unsigned int flags, int private_flags),
+	TP_ARGS(drm_id, flags, private_flags),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	unsigned int,		flags		)
+		__field(	int,			private_flags	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->flags = flags;
+		__entry->private_flags = private_flags;
+	),
+	TP_printk("id=%u, flags=%u, private_flags=%d",
+		  __entry->drm_id, __entry->flags, __entry->private_flags)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	bool,			enable		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->enable = enable;
+	),
+	TP_printk("id=%u, enable=%s",
+		  __entry->drm_id, __entry->enable ? "true" : "false")
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_phys_cmd_connect_te,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable)
+);
+
+TRACE_EVENT(dpu_enc_rc,
+	TP_PROTO(uint32_t drm_id, u32 sw_event, bool idle_pc_supported,
+		 int rc_state, const char *stage),
+	TP_ARGS(drm_id, sw_event, idle_pc_supported, rc_state, stage),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id			)
+		__field(	u32,		sw_event		)
+		__field(	bool,		idle_pc_supported	)
+		__field(	int,		rc_state		)
+		__string(	stage_str,	stage			)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->sw_event = sw_event;
+		__entry->idle_pc_supported = idle_pc_supported;
+		__entry->rc_state = rc_state;
+		__assign_str(stage_str, stage);
+	),
+	TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d\n",
+		  __get_str(stage_str), __entry->drm_id, __entry->sw_event,
+		  __entry->idle_pc_supported ? "true" : "false",
+		  __entry->rc_state)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
+	TP_PROTO(uint32_t drm_id, u32 event, enum dpu_intf intf_idx),
+	TP_ARGS(drm_id, event, intf_idx),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id		)
+		__field(	u32,		event		)
+		__field(	enum dpu_intf,	intf_idx	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->event = event;
+		__entry->intf_idx = intf_idx;
+	),
+	TP_printk("id=%u, event=%u, intf=%d", __entry->drm_id, __entry->event,
+		  __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb,
+	TP_PROTO(uint32_t drm_id, unsigned int idx,
+		 unsigned long frame_busy_mask),
+	TP_ARGS(drm_id, idx, frame_busy_mask),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	unsigned int,		idx		)
+		__field(	unsigned long,		frame_busy_mask	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->idx = idx;
+		__entry->frame_busy_mask = frame_busy_mask;
+	),
+	TP_printk("id=%u, idx=%u, frame_busy_mask=%lx", __entry->drm_id,
+		  __entry->idx, __entry->frame_busy_mask)
+);
+
+TRACE_EVENT(dpu_enc_trigger_flush,
+	TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+		 int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
+	TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
+		pending_flush_ret),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id			)
+		__field(	enum dpu_intf,	intf_idx		)
+		__field(	int,		pending_kickoff_cnt	)
+		__field(	int,		ctl_idx			)
+		__field(	u32,		pending_flush_ret	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intf_idx = intf_idx;
+		__entry->pending_kickoff_cnt = pending_kickoff_cnt;
+		__entry->ctl_idx = ctl_idx;
+		__entry->pending_flush_ret = pending_flush_ret;
+	),
+	TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+		  "pending_flush_ret=%u", __entry->drm_id,
+		  __entry->intf_idx, __entry->pending_kickoff_cnt,
+		  __entry->ctl_idx, __entry->pending_flush_ret)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
+	TP_PROTO(uint32_t drm_id, ktime_t time),
+	TP_ARGS(drm_id, time),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id	)
+		__field(	ktime_t,	time	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->time = time;
+	),
+	TP_printk("id=%u, time=%lld", __entry->drm_id,
+		  ktime_to_ms(__entry->time))
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_vsync_event_work,
+	TP_PROTO(uint32_t drm_id, ktime_t time),
+	TP_ARGS(drm_id, time)
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_early_kickoff,
+	TP_PROTO(uint32_t drm_id, ktime_t time),
+	TP_ARGS(drm_id, time)
+);
+
+DECLARE_EVENT_CLASS(dpu_id_event_template,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id	)
+		__field(	u32,		event	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->event = event;
+	),
+	TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_enc_frame_done_timeout,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+
+TRACE_EVENT(dpu_enc_wait_event_timeout,
+	TP_PROTO(uint32_t drm_id, int32_t hw_id, int rc, s64 time,
+		 s64 expected_time, int atomic_cnt),
+	TP_ARGS(drm_id, hw_id, rc, time, expected_time, atomic_cnt),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id		)
+		__field(	int32_t,	hw_id		)
+		__field(	int,		rc		)
+		__field(	s64,		time		)
+		__field(	s64,		expected_time	)
+		__field(	int,		atomic_cnt	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->hw_id = hw_id;
+		__entry->rc = rc;
+		__entry->time = time;
+		__entry->expected_time = expected_time;
+		__entry->atomic_cnt = atomic_cnt;
+	),
+	TP_printk("id=%u, hw_id=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+		  __entry->drm_id, __entry->hw_id, __entry->rc, __entry->time,
+		  __entry->expected_time, __entry->atomic_cnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl,
+	TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable,
+		 int refcnt),
+	TP_ARGS(drm_id, pp, enable, refcnt),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id	)
+		__field(	enum dpu_pingpong,	pp	)
+		__field(	bool,			enable	)
+		__field(	int,			refcnt	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->pp = pp;
+		__entry->enable = enable;
+		__entry->refcnt = refcnt;
+	),
+	TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id,
+		  __entry->pp, __entry->enable ? "true" : "false",
+		  __entry->refcnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pp_tx_done,
+	TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count,
+		 u32 event),
+	TP_ARGS(drm_id, pp, new_count, event),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	enum dpu_pingpong,	pp		)
+		__field(	int,			new_count	)
+		__field(	u32,			event		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->pp = pp;
+		__entry->new_count = new_count;
+		__entry->event = event;
+	),
+	TP_printk("id=%u, pp=%d, new_count=%d, event=%u", __entry->drm_id,
+		  __entry->pp, __entry->new_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pdone_timeout,
+	TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int timeout_count,
+		 int kickoff_count, u32 event),
+	TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	enum dpu_pingpong,	pp		)
+		__field(	int,			timeout_count	)
+		__field(	int,			kickoff_count	)
+		__field(	u32,			event		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->pp = pp;
+		__entry->timeout_count = timeout_count;
+		__entry->kickoff_count = kickoff_count;
+		__entry->event = event;
+	),
+	TP_printk("id=%u, pp=%d, timeout_count=%d, kickoff_count=%d, event=%u",
+		  __entry->drm_id, __entry->pp, __entry->timeout_count,
+		  __entry->kickoff_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_post_kickoff,
+	TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx),
+	TP_ARGS(drm_id, intf_idx),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id			)
+		__field(	enum dpu_intf,	intf_idx		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intf_idx = intf_idx;
+	),
+	TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl,
+	TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable,
+		 int refcnt),
+	TP_ARGS(drm_id, intf_idx, enable, refcnt),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id		)
+		__field(	enum dpu_intf,	intf_idx	)
+		__field(	bool,		enable		)
+		__field(	int,		refcnt		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intf_idx = intf_idx;
+		__entry->enable = enable;
+		__entry->refcnt = refcnt;
+	),
+	TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id,
+		  __entry->intf_idx, __entry->enable ? "true" : "false",
+		  __entry->drm_id)
+);
+
+TRACE_EVENT(dpu_crtc_setup_mixer,
+	TP_PROTO(uint32_t crtc_id, uint32_t plane_id,
+		 struct drm_plane_state *state, struct dpu_plane_state *pstate,
+		 uint32_t stage_idx, enum dpu_sspp sspp, uint32_t pixel_format,
+		 uint64_t modifier),
+	TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx, sspp,
+		pixel_format, modifier),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		crtc_id		)
+		__field(	uint32_t,		plane_id	)
+		__field(	struct drm_plane_state*,state		)
+		__field(	struct dpu_plane_state*,pstate		)
+		__field(	uint32_t,		stage_idx	)
+		__field(	enum dpu_sspp,		sspp		)
+		__field(	uint32_t,		pixel_format	)
+		__field(	uint64_t,		modifier	)
+	),
+	TP_fast_assign(
+		__entry->crtc_id = crtc_id;
+		__entry->plane_id = plane_id;
+		__entry->state = state;
+		__entry->pstate = pstate;
+		__entry->stage_idx = stage_idx;
+		__entry->sspp = sspp;
+		__entry->pixel_format = pixel_format;
+		__entry->modifier = modifier;
+	),
+	TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} "
+		  "dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d "
+		  "multirect_index:%d multirect_mode:%u pix_format:%u "
+		  "modifier:%llu",
+		  __entry->crtc_id, __entry->plane_id,
+		  __entry->state->fb ? __entry->state->fb->base.id : -1,
+		  __entry->state->src_w >> 16,  __entry->state->src_h >> 16,
+		  __entry->state->src_x >> 16,  __entry->state->src_y >> 16,
+		  __entry->state->crtc_w,  __entry->state->crtc_h,
+		  __entry->state->crtc_x,  __entry->state->crtc_y,
+		  __entry->stage_idx, __entry->pstate->stage, __entry->sspp,
+		  __entry->pstate->multirect_index,
+		  __entry->pstate->multirect_mode, __entry->pixel_format,
+		  __entry->modifier)
+);
+
+TRACE_EVENT(dpu_crtc_setup_lm_bounds,
+	TP_PROTO(uint32_t drm_id, int mixer, struct drm_rect *bounds),
+	TP_ARGS(drm_id, mixer, bounds),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id	)
+		__field(	int,			mixer	)
+		__field(	struct drm_rect *,	bounds	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->mixer = mixer;
+		__entry->bounds = bounds;
+	),
+	TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
+		  __entry->mixer, DRM_RECT_ARG(__entry->bounds))
+);
+
+TRACE_EVENT(dpu_crtc_vblank_enable,
+	TP_PROTO(uint32_t drm_id, uint32_t enc_id, bool enable,
+		 struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enc_id, enable, crtc),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id	)
+		__field(	uint32_t,		enc_id	)
+		__field(	bool,			enable	)
+		__field(	struct dpu_crtc *,	crtc	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->enc_id = enc_id;
+		__entry->enable = enable;
+		__entry->crtc = crtc;
+	),
+	TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
+		  "vblank_req:%s}",
+		  __entry->drm_id, __entry->enc_id,
+		  __entry->enable ? "true" : "false",
+		  __entry->crtc->enabled ? "true" : "false",
+		  __entry->crtc->suspend ? "true" : "false",
+		  __entry->crtc->vblank_requested ? "true" : "false")
+);
+
+DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id	)
+		__field(	bool,			enable	)
+		__field(	struct dpu_crtc *,	crtc	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->enable = enable;
+		__entry->crtc = crtc;
+	),
+	TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+		  __entry->drm_id, __entry->enable ? "true" : "false",
+		  __entry->crtc->enabled ? "true" : "false",
+		  __entry->crtc->suspend ? "true" : "false",
+		  __entry->crtc->vblank_requested ? "true" : "false")
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_disable,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_vblank,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc)
+);
+
+TRACE_EVENT(dpu_crtc_disable_frame_pending,
+	TP_PROTO(uint32_t drm_id, int frame_pending),
+	TP_ARGS(drm_id, frame_pending),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	int,			frame_pending	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->frame_pending = frame_pending;
+	),
+	TP_printk("id:%u frame_pending:%d", __entry->drm_id,
+		  __entry->frame_pending)
+);
+
+TRACE_EVENT(dpu_plane_set_scanout,
+	TP_PROTO(enum dpu_sspp index, struct dpu_hw_fmt_layout *layout,
+		 enum dpu_sspp_multirect_index multirect_index),
+	TP_ARGS(index, layout, multirect_index),
+	TP_STRUCT__entry(
+		__field(	enum dpu_sspp,			index	)
+		__field(	struct dpu_hw_fmt_layout*,	layout	)
+		__field(	enum dpu_sspp_multirect_index,	multirect_index)
+	),
+	TP_fast_assign(
+		__entry->index = index;
+		__entry->layout = layout;
+		__entry->multirect_index = multirect_index;
+	),
+	TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
+		  "multirect_index:%d", __entry->index, __entry->layout->width,
+		  __entry->layout->height, __entry->layout->plane_addr[0],
+		  __entry->layout->plane_size[0],
+		  __entry->layout->plane_addr[1],
+		  __entry->layout->plane_size[1],
+		  __entry->layout->plane_addr[2],
+		  __entry->layout->plane_size[2],
+		  __entry->layout->plane_addr[3],
+		  __entry->layout->plane_size[3], __entry->multirect_index)
+);
+
+TRACE_EVENT(dpu_plane_disable,
+	TP_PROTO(uint32_t drm_id, bool is_virtual, uint32_t multirect_mode),
+	TP_ARGS(drm_id, is_virtual, multirect_mode),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	bool,			is_virtual	)
+		__field(	uint32_t,		multirect_mode	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->is_virtual = is_virtual;
+		__entry->multirect_mode = multirect_mode;
+	),
+	TP_printk("id:%u is_virtual:%s multirect_mode:%u", __entry->drm_id,
+		  __entry->is_virtual ? "true" : "false",
+		  __entry->multirect_mode)
+);
+
+DECLARE_EVENT_CLASS(dpu_rm_iter_template,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+	TP_ARGS(id, type, enc_id),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		id	)
+		__field(	enum dpu_hw_blk_type,	type	)
+		__field(	uint32_t,		enc_id	)
+	),
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->type = type;
+		__entry->enc_id = enc_id;
+	),
+	TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
+		  __entry->enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+	TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+	TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+	TP_ARGS(id, type, enc_id)
+);
+
+TRACE_EVENT(dpu_rm_reserve_lms,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id,
+		 uint32_t pp_id),
+	TP_ARGS(id, type, enc_id, pp_id),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		id	)
+		__field(	enum dpu_hw_blk_type,	type	)
+		__field(	uint32_t,		enc_id	)
+		__field(	uint32_t,		pp_id	)
+	),
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->type = type;
+		__entry->enc_id = enc_id;
+		__entry->pp_id = pp_id;
+	),
+	TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id,
+		  __entry->type, __entry->enc_id, __entry->pp_id)
+);
+
+TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
+	TP_PROTO(enum dpu_vbif index, u32 xin_id),
+	TP_ARGS(index, xin_id),
+	TP_STRUCT__entry(
+		__field(	enum dpu_vbif,	index	)
+		__field(	u32,		xin_id	)
+	),
+	TP_fast_assign(
+		__entry->index = index;
+		__entry->xin_id = xin_id;
+	),
+	TP_printk("index:%d xin_id:%u", __entry->index, __entry->xin_id)
+);
+
+TRACE_EVENT(dpu_pp_connect_ext_te,
+	TP_PROTO(enum dpu_pingpong pp, u32 cfg),
+	TP_ARGS(pp, cfg),
+	TP_STRUCT__entry(
+		__field(	enum dpu_pingpong,	pp	)
+		__field(	u32,			cfg	)
+	),
+	TP_fast_assign(
+		__entry->pp = pp;
+		__entry->cfg = cfg;
+	),
+	TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template,
+	TP_PROTO(int irq_idx, int enable_count),
+	TP_ARGS(irq_idx, enable_count),
+	TP_STRUCT__entry(
+		__field(	int,	irq_idx		)
+		__field(	int,	enable_count	)
+	),
+	TP_fast_assign(
+		__entry->irq_idx = irq_idx;
+		__entry->enable_count = enable_count;
+	),
+	TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx,
+		  __entry->enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx,
+	TP_PROTO(int irq_idx, int enable_count),
+	TP_ARGS(irq_idx, enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx,
+	TP_PROTO(int irq_idx, int enable_count),
+	TP_ARGS(irq_idx, enable_count)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
+	TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+	TP_ARGS(irq_idx, callback),
+	TP_STRUCT__entry(
+		__field(	int,				irq_idx	)
+		__field(	struct dpu_irq_callback *,	callback)
+	),
+	TP_fast_assign(
+		__entry->irq_idx = irq_idx;
+		__entry->callback = callback;
+	),
+	TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx,
+		  __entry->callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_register_callback,
+	TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+	TP_ARGS(irq_idx, callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_unregister_callback,
+	TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+	TP_ARGS(irq_idx, callback)
+);
+
+TRACE_EVENT(dpu_core_perf_update_clk,
+	TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
+	TP_ARGS(dev, stop_req, clk_rate),
+	TP_STRUCT__entry(
+		__field(	struct drm_device *,	dev		)
+		__field(	bool,			stop_req	)
+		__field(	u64,			clk_rate	)
+	),
+	TP_fast_assign(
+		__entry->dev = dev;
+		__entry->stop_req = stop_req;
+		__entry->clk_rate = clk_rate;
+	),
+	TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique,
+		  __entry->stop_req ? "true" : "false", __entry->clk_rate)
+);
+
+#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
+
+#define DPU_ATRACE_INT(name, value) \
+	trace_dpu_trace_counter(current->tgid, name, value)
+
+#endif /* _DPU_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
new file mode 100644
index 0000000..2955282
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -0,0 +1,384 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "dpu_vbif.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_trace.h"
+
+/**
+ * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif:	Pointer to hardware vbif driver
+ * @xin_id:	Client interface identifier
+ * @return:	0 if success; error code otherwise
+ */
+static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+	ktime_t timeout;
+	bool status;
+	int rc;
+
+	if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+		DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+		return -EINVAL;
+	}
+
+	timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+	for (;;) {
+		status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+		if (status)
+			break;
+		if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+			status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+			break;
+		}
+		usleep_range(501, 1000);
+	}
+
+	if (!status) {
+		rc = -ETIMEDOUT;
+		DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
+				vbif->idx - VBIF_0, xin_id);
+	} else {
+		rc = 0;
+		DPU_DEBUG("VBIF %d client %d is halted\n",
+				vbif->idx - VBIF_0, xin_id);
+	}
+
+	return rc;
+}
+
+/**
+ * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @ot_lim:	Pointer to OT limit to be modified
+ * @params:	Pointer to usecase parameters
+ */
+static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
+		u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
+{
+	u64 pps;
+	const struct dpu_vbif_dynamic_ot_tbl *tbl;
+	u32 i;
+
+	if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
+		return;
+
+	/* Dynamic OT setting done only for WFD */
+	if (!params->is_wfd)
+		return;
+
+	pps = params->frame_rate;
+	pps *= params->width;
+	pps *= params->height;
+
+	tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+			&vbif->cap->dynamic_ot_wr_tbl;
+
+	for (i = 0; i < tbl->count; i++) {
+		if (pps <= tbl->cfg[i].pps) {
+			*ot_lim = tbl->cfg[i].ot_limit;
+			break;
+		}
+	}
+
+	DPU_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+			vbif->idx - VBIF_0, params->xin_id,
+			params->width, params->height, params->frame_rate,
+			pps, *ot_lim);
+}
+
+/**
+ * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @params:	Pointer to usecase parameters
+ * @return:	OT limit
+ */
+static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
+	struct dpu_vbif_set_ot_params *params)
+{
+	u32 ot_lim = 0;
+	u32 val;
+
+	if (!vbif || !vbif->cap) {
+		DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+		return -EINVAL;
+	}
+
+	if (vbif->cap->default_ot_wr_limit && !params->rd)
+		ot_lim = vbif->cap->default_ot_wr_limit;
+	else if (vbif->cap->default_ot_rd_limit && params->rd)
+		ot_lim = vbif->cap->default_ot_rd_limit;
+
+	/*
+	 * If default ot is not set from dt/catalog,
+	 * then do not configure it.
+	 */
+	if (ot_lim == 0)
+		goto exit;
+
+	/* Modify the limits if the target and the use case requires it */
+	_dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+	if (vbif && vbif->ops.get_limit_conf) {
+		val = vbif->ops.get_limit_conf(vbif,
+				params->xin_id, params->rd);
+		if (val == ot_lim)
+			ot_lim = 0;
+	}
+
+exit:
+	DPU_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+			vbif->idx - VBIF_0, params->xin_id, ot_lim);
+	return ot_lim;
+}
+
+/**
+ * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @params:	Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+		struct dpu_vbif_set_ot_params *params)
+{
+	struct dpu_hw_vbif *vbif = NULL;
+	struct dpu_hw_mdp *mdp;
+	bool forced_on = false;
+	u32 ot_lim;
+	int ret, i;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+	mdp = dpu_kms->hw_mdp;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+		if (dpu_kms->hw_vbif[i] &&
+				dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
+			vbif = dpu_kms->hw_vbif[i];
+	}
+
+	if (!vbif || !mdp) {
+		DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
+				vbif != 0, mdp != 0);
+		return;
+	}
+
+	if (!mdp->ops.setup_clk_force_ctrl ||
+			!vbif->ops.set_limit_conf ||
+			!vbif->ops.set_halt_ctrl)
+		return;
+
+	/* set write_gather_en for all write clients */
+	if (vbif->ops.set_write_gather_en && !params->rd)
+		vbif->ops.set_write_gather_en(vbif, params->xin_id);
+
+	ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+	if (ot_lim == 0)
+		goto exit;
+
+	trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
+		params->vbif_idx);
+
+	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+	vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+	vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+	ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
+	if (ret)
+		trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
+
+	vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+	if (forced_on)
+		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+exit:
+	return;
+}
+
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+		struct dpu_vbif_set_qos_params *params)
+{
+	struct dpu_hw_vbif *vbif = NULL;
+	struct dpu_hw_mdp *mdp;
+	bool forced_on = false;
+	const struct dpu_vbif_qos_tbl *qos_tbl;
+	int i;
+
+	if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+	mdp = dpu_kms->hw_mdp;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+		if (dpu_kms->hw_vbif[i] &&
+				dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
+			vbif = dpu_kms->hw_vbif[i];
+			break;
+		}
+	}
+
+	if (!vbif || !vbif->cap) {
+		DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
+		return;
+	}
+
+	if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+		DPU_DEBUG("qos remap not supported\n");
+		return;
+	}
+
+	qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+			&vbif->cap->qos_nrt_tbl;
+
+	if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+		DPU_DEBUG("qos tbl not defined\n");
+		return;
+	}
+
+	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+	for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+		DPU_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+				params->vbif_idx, params->xin_id, i,
+				qos_tbl->priority_lvl[i]);
+		vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+				qos_tbl->priority_lvl[i]);
+	}
+
+	if (forced_on)
+		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
+{
+	struct dpu_hw_vbif *vbif;
+	u32 i, pnd, src;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid argument\n");
+		return;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+		vbif = dpu_kms->hw_vbif[i];
+		if (vbif && vbif->ops.clear_errors) {
+			vbif->ops.clear_errors(vbif, &pnd, &src);
+			if (pnd || src) {
+				DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
+					      vbif->idx - VBIF_0, pnd, src);
+			}
+		}
+	}
+}
+
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
+{
+	struct dpu_hw_vbif *vbif;
+	int i, j;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid argument\n");
+		return;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+		vbif = dpu_kms->hw_vbif[i];
+		if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+			for (j = 0; j < vbif->cap->memtype_count; j++)
+				vbif->ops.set_mem_type(
+						vbif, j, vbif->cap->memtype[j]);
+		}
+	}
+}
+
+#ifdef CONFIG_DEBUG_FS
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+	debugfs_remove_recursive(dpu_kms->debugfs_vbif);
+	dpu_kms->debugfs_vbif = NULL;
+}
+
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+{
+	char vbif_name[32];
+	struct dentry *debugfs_vbif;
+	int i, j;
+
+	dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
+	if (!dpu_kms->debugfs_vbif) {
+		DPU_ERROR("failed to create vbif debugfs\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+		struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+		snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+		debugfs_vbif = debugfs_create_dir(vbif_name,
+				dpu_kms->debugfs_vbif);
+
+		debugfs_create_u32("features", 0600, debugfs_vbif,
+			(u32 *)&vbif->features);
+
+		debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
+			(u32 *)&vbif->xin_halt_timeout);
+
+		debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
+			(u32 *)&vbif->default_ot_rd_limit);
+
+		debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
+			(u32 *)&vbif->default_ot_wr_limit);
+
+		for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+			struct dpu_vbif_dynamic_ot_cfg *cfg =
+					&vbif->dynamic_ot_rd_tbl.cfg[j];
+
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_rd_%d_pps", j);
+			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+					(u64 *)&cfg->pps);
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_rd_%d_ot_limit", j);
+			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+					(u32 *)&cfg->ot_limit);
+		}
+
+		for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+			struct dpu_vbif_dynamic_ot_cfg *cfg =
+					&vbif->dynamic_ot_wr_tbl.cfg[j];
+
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_wr_%d_pps", j);
+			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+					(u64 *)&cfg->pps);
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_wr_%d_ot_limit", j);
+			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+					(u32 *)&cfg->ot_limit);
+		}
+	}
+
+	return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
new file mode 100644
index 0000000..f17af52
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -0,0 +1,94 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_VBIF_H__
+#define __DPU_VBIF_H__
+
+#include "dpu_kms.h"
+
+struct dpu_vbif_set_ot_params {
+	u32 xin_id;
+	u32 num;
+	u32 width;
+	u32 height;
+	u32 frame_rate;
+	bool rd;
+	bool is_wfd;
+	u32 vbif_idx;
+	u32 clk_ctrl;
+};
+
+struct dpu_vbif_set_memtype_params {
+	u32 xin_id;
+	u32 vbif_idx;
+	u32 clk_ctrl;
+	bool is_cacheable;
+};
+
+/**
+ * struct dpu_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct dpu_vbif_set_qos_params {
+	u32 vbif_idx;
+	u32 xin_id;
+	u32 clk_ctrl;
+	u32 num;
+	bool is_rt;
+};
+
+/**
+ * dpu_vbif_set_ot_limit - set OT limit for vbif client
+ * @dpu_kms:	DPU handler
+ * @params:	Pointer to OT configuration parameters
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+		struct dpu_vbif_set_ot_params *params);
+
+/**
+ * dpu_vbif_set_qos_remap - set QoS priority level remap
+ * @dpu_kms:	DPU handler
+ * @params:	Pointer to QoS configuration parameters
+ */
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+		struct dpu_vbif_set_qos_params *params);
+
+/**
+ * dpu_vbif_clear_errors - clear any vbif errors
+ * @dpu_kms:	DPU handler
+ */
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_vbif_init_memtypes - initialize xin memory types for vbif
+ * @dpu_kms:	DPU handler
+ */
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
+
+#ifdef CONFIG_DEBUG_FS
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
+#else
+static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
+		struct dentry *debugfs_root)
+{
+	return 0;
+}
+static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
new file mode 100644
index 0000000..4f12e5c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -0,0 +1,1376 @@
+#ifndef __MEDIA_INFO_H__
+#define __MEDIA_INFO_H__
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+	((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+	(((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+#ifndef MSM_MEDIA_MAX
+#define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b))
+#endif
+
+enum color_fmts {
+	/* Venus NV12:
+	 * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+	 * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * U V U V U V U V U V U V . . . .  ^
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 128
+	 * UV_Stride : Width aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines
+	 *          + max(Extradata, Y_Stride * 8), 4096)
+	 */
+	COLOR_FMT_NV12,
+
+	/* Venus NV21:
+	 * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+	 * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * V U V U V U V U V U V U . . . .  ^
+	 * V U V U V U V U V U V U . . . .  |
+	 * V U V U V U V U V U V U . . . .  |
+	 * V U V U V U V U V U V U . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Padding & Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 128
+	 * UV_Stride : Width aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines
+	 *          + max(Extradata, Y_Stride * 8), 4096)
+	 */
+	COLOR_FMT_NV21,
+	/* Venus NV12_MVTB:
+	 * Two YUV 4:2:0 images/views one after the other
+	 * in a top-bottom layout, same as NV12
+	 * with a plane of 8 bit Y samples followed
+	 * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^               ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |               |
+	 * . . . . . . . . . . . . . . . .              |             View_1
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              V               |
+	 * U V U V U V U V U V U V . . . .  ^                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines                |
+	 * . . . . . . . . . . . . . . . .  |                           |
+	 * . . . . . . . . . . . . . . . .  V                           V
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^               ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |               |
+	 * . . . . . . . . . . . . . . . .              |             View_2
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              V               |
+	 * U V U V U V U V U V U V . . . .  ^                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines                |
+	 * . . . . . . . . . . . . . . . .  |                           |
+	 * . . . . . . . . . . . . . . . .  V                           V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 128
+	 * UV_Stride : Width aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * View_1 begin at: 0 (zero)
+	 * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((2*(Y_Stride * Y_Scanlines)
+	 *          + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096)
+	 */
+	COLOR_FMT_NV12_MVTB,
+	/*
+	 * The buffer can be of 2 types:
+	 * (1) Venus NV12 UBWC Progressive
+	 * (2) Venus NV12 UBWC Interlaced
+	 *
+	 * (1) Venus NV12 UBWC Progressive Buffer Format:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 * Y_Stride = align(Width, 128)
+	 * UV_Stride = align(Width, 128)
+	 * Y_Scanlines = align(Height, 32)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size
+	 *           + max(Extradata, Y_Stride * 48), 4096)
+	 *
+	 *
+	 * (2) Venus NV12 UBWC Interlaced Buffer Format:
+	 * Compressed Macro-tile format for NV12 interlaced.
+	 * Contains 8 planes in the following order -
+	 * (A) Y_Meta_Top_Field_Plane
+	 * (B) Y_UBWC_Top_Field_Plane
+	 * (C) UV_Meta_Top_Field_Plane
+	 * (D) UV_UBWC_Top_Field_Plane
+	 * (E) Y_Meta_Bottom_Field_Plane
+	 * (F) Y_UBWC_Bottom_Field_Plane
+	 * (G) UV_Meta_Bottom_Field_Plane
+	 * (H) UV_UBWC_Bottom_Field_Plane
+	 * Y_Meta_Top_Field_Plane consists of meta information to decode
+	 * compressed tile data for Y_UBWC_Top_Field_Plane.
+	 * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile
+	 * format for top field of an interlaced frame.
+	 * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together
+	 * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+	 * 8 bit Y samples for top field of an interlaced frame.
+	 *
+	 * UV_Meta_Top_Field_Plane consists of meta information to decode
+	 * compressed tile data in UV_UBWC_Top_Field_Plane.
+	 * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile
+	 * format for top field of an interlaced frame.
+	 * UBWC decoder block will use UV_Meta_Top_Field_Plane data together
+	 * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+	 * 8 bit subsampled color difference samples for top field of an
+	 * interlaced frame.
+	 *
+	 * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is
+	 * independently decodable and randomly accessible. There is no
+	 * dependency between tiles.
+	 *
+	 * Y_Meta_Bottom_Field_Plane consists of meta information to decode
+	 * compressed tile data for Y_UBWC_Bottom_Field_Plane.
+	 * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile
+	 * format for bottom field of an interlaced frame.
+	 * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data
+	 * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less
+	 * uncompressed 8 bit Y samples for bottom field of an interlaced frame.
+	 *
+	 * UV_Meta_Bottom_Field_Plane consists of meta information to decode
+	 * compressed tile data in UV_UBWC_Bottom_Field_Plane.
+	 * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed
+	 * macro-tile format for bottom field of an interlaced frame.
+	 * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together
+	 * with UV_UBWC_Bottom_Field_Plane data to produce loss-less
+	 * uncompressed 8 bit subsampled color difference samples for bottom
+	 * field of an interlaced frame.
+	 *
+	 * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is
+	 * independently decodable and randomly accessible. There is no
+	 * dependency between tiles.
+	 *
+	 * <-----Y_TF_Meta_Stride---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . . Half_height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_TF_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-Compressed tile Y_TF Stride->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height  |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_TF_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----UV_TF_Meta_Stride---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_TF_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <-Compressed tile UV_TF Stride->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_TF_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * <-----Y_BF_Meta_Stride---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . . Half_height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_BF_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-Compressed tile Y_BF Stride->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height  |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_BF_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----UV_BF_Meta_Stride---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_BF_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <-Compressed tile UV_BF Stride->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_BF_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 * Half_height = (Height+1)>>1
+	 * Y_TF_Stride = align(Width, 128)
+	 * UV_TF_Stride = align(Width, 128)
+	 * Y_TF_Scanlines = align(Half_height, 32)
+	 * UV_TF_Scanlines = align((Half_height+1)/2, 32)
+	 * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096)
+	 * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096)
+	 * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+	 * Y_TF_Meta_Plane_size =
+	 *     align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096)
+	 * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+	 * UV_TF_Meta_Plane_size =
+	 *     align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096)
+	 * Y_BF_Stride = align(Width, 128)
+	 * UV_BF_Stride = align(Width, 128)
+	 * Y_BF_Scanlines = align(Half_height, 32)
+	 * UV_BF_Scanlines = align((Half_height+1)/2, 32)
+	 * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096)
+	 * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096)
+	 * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+	 * Y_BF_Meta_Plane_size =
+	 *     align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096)
+	 * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+	 * UV_BF_Meta_Plane_size =
+	 *     align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size +
+	 *           Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size +
+	 *			 Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size +
+	 *           Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +
+	 *           + max(Extradata, Y_TF_Stride * 48), 4096)
+	 */
+	COLOR_FMT_NV12_UBWC,
+	/* Venus NV12 10-bit UBWC:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ----->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 *
+	 * Y_Stride = align(Width * 4/3, 128)
+	 * UV_Stride = align(Width * 4/3, 128)
+	 * Y_Scanlines = align(Height, 32)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size
+	 *           + max(Extradata, Y_Stride * 48), 4096)
+	 */
+	COLOR_FMT_NV12_BPP10_UBWC,
+	/* Venus RGBA8888 format:
+	 * Contains 1 plane in the following order -
+	 * (A) RGBA plane
+	 *
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 128)
+	 * RGB_Scanlines = align(Height, 32)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Plane_size + Extradata, 4096)
+	 */
+	COLOR_FMT_RGBA8888,
+	/* Venus RGBA8888 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGBA plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 128)
+	 * RGB_Scanlines = align(Height, 32)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+	 *		Extradata, 4096)
+	 */
+	COLOR_FMT_RGBA8888_UBWC,
+	/* Venus RGBA1010102 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGBA plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 256)
+	 * RGB_Scanlines = align(Height, 16)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+	 *		Extradata, 4096)
+	 */
+	COLOR_FMT_RGBA1010102_UBWC,
+	/* Venus RGB565 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGB plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 2, 128)
+	 * RGB_Scanlines = align(Height, 16)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+	 *		Extradata, 4096)
+	 */
+	COLOR_FMT_RGB565_UBWC,
+	/* P010 UBWC:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ----->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 *
+	 * Y_Stride = align(Width * 2, 256)
+	 * UV_Stride = align(Width * 2, 256)
+	 * Y_Scanlines = align(Height, 16)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size
+	 *           + max(Extradata, Y_Stride * 48), 4096)
+	 */
+	COLOR_FMT_P010_UBWC,
+	/* Venus P010:
+	 * YUV 4:2:0 image with a plane of 10 bit Y samples followed
+	 * by an interleaved U/V plane containing 10 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * U V U V U V U V U V U V . . . .  ^
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width * 2 aligned to 128
+	 * UV_Stride : Width * 2 aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines
+	 *          + max(Extradata, Y_Stride * 8), 4096)
+	 */
+	COLOR_FMT_P010,
+};
+
+#define COLOR_FMT_RGBA1010102_UBWC	COLOR_FMT_RGBA1010102_UBWC
+#define COLOR_FMT_RGB565_UBWC		COLOR_FMT_RGB565_UBWC
+#define COLOR_FMT_P010_UBWC		COLOR_FMT_P010_UBWC
+#define COLOR_FMT_P010		COLOR_FMT_P010
+
+static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
+{
+	(void)height;
+	(void)width;
+
+	/*
+	 * In the future, calculate the size based on the w/h but just
+	 * hardcode it for now since 16K satisfies all current usecases.
+	 */
+	return 16 * 1024;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+{
+	unsigned int alignment, stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width, alignment);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width, 192);
+		stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+		break;
+	case COLOR_FMT_P010:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width*2, alignment);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+	unsigned int alignment, stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width, alignment);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width, 192);
+		stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+		break;
+	case COLOR_FMT_P010:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width*2, alignment);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+	unsigned int alignment, sclines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010:
+		alignment = 32;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		alignment = 16;
+		break;
+	default:
+		return 0;
+	}
+	sclines = MSM_MEDIA_ALIGN(height, alignment);
+invalid_input:
+	return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+{
+	unsigned int alignment, sclines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+	case COLOR_FMT_P010:
+		alignment = 16;
+		break;
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 32;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
+
+invalid_input:
+	return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+{
+	int y_tile_width = 0, y_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		y_tile_width = 32;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		y_tile_width = 48;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+	y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
+
+invalid_input:
+	return y_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+{
+	int y_tile_height = 0, y_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+		y_tile_height = 8;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		y_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+	y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+
+invalid_input:
+	return y_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+{
+	int uv_tile_width = 0, uv_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		uv_tile_width = 16;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		uv_tile_width = 24;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
+	uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+
+invalid_input:
+	return uv_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+{
+	int uv_tile_height = 0, uv_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+		uv_tile_height = 8;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		uv_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
+	uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+
+invalid_input:
+	return uv_meta_scanlines;
+}
+
+static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+{
+	unsigned int alignment = 0, stride = 0, bpp = 4;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888:
+		alignment = 128;
+		break;
+	case COLOR_FMT_RGB565_UBWC:
+		alignment = 256;
+		bpp = 2;
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+		alignment = 256;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
+
+invalid_input:
+	return stride;
+}
+
+static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+{
+	unsigned int alignment = 0, scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888:
+		alignment = 32;
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		alignment = 16;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	scanlines = MSM_MEDIA_ALIGN(height, alignment);
+
+invalid_input:
+	return scanlines;
+}
+
+static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+{
+	int rgb_tile_width = 0, rgb_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_tile_width = 16;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
+	rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+
+invalid_input:
+	return rgb_meta_stride;
+}
+
+static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+{
+	int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
+	rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+
+invalid_input:
+	return rgb_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ * @height
+ * Progressive: height
+ * Interlaced: height
+ */
+static inline unsigned int VENUS_BUFFER_SIZE(
+	int color_fmt, int width, int height)
+{
+	const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
+	unsigned int uv_alignment = 0, size = 0;
+	unsigned int y_plane, uv_plane, y_stride,
+		uv_stride, y_sclines, uv_sclines;
+	unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+	unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+	unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+	unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+	unsigned int rgb_stride = 0, rgb_scanlines = 0;
+	unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
+	unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
+
+	if (!width || !height)
+		goto invalid_input;
+
+	y_stride = VENUS_Y_STRIDE(color_fmt, width);
+	uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+	y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+	uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+	rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
+	rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_P010:
+		uv_alignment = 4096;
+		y_plane = y_stride * y_sclines;
+		uv_plane = uv_stride * uv_sclines + uv_alignment;
+		size = y_plane + uv_plane +
+				MSM_MEDIA_MAX(extra_size, 8 * y_stride);
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_NV12_MVTB:
+		uv_alignment = 4096;
+		y_plane = y_stride * y_sclines;
+		uv_plane = uv_stride * uv_sclines + uv_alignment;
+		size = y_plane + uv_plane;
+		size = 2 * size + extra_size;
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_NV12_UBWC:
+		y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines =
+			VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+			y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines =
+			VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+			uv_meta_scanlines, 4096);
+
+		size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane)*2 +
+			MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+				y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+					uv_meta_scanlines, 4096);
+
+		size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane +
+			MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+				y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+					uv_meta_scanlines, 4096);
+
+		size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane;
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_RGBA8888:
+		rgb_plane = MSM_MEDIA_ALIGN(rgb_stride  * rgb_scanlines, 4096);
+		size = rgb_plane;
+		size =  MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
+							4096);
+		rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
+		rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
+					height);
+		rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
+					rgb_meta_scanlines, 4096);
+		size = rgb_ubwc_plane + rgb_meta_plane;
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return size;
+}
+
+static inline unsigned int VENUS_VIEW2_OFFSET(
+	int color_fmt, int width, int height)
+{
+	unsigned int offset = 0;
+	unsigned int y_plane, uv_plane, y_stride,
+		uv_stride, y_sclines, uv_sclines;
+	if (!width || !height)
+		goto invalid_input;
+
+	y_stride = VENUS_Y_STRIDE(color_fmt, width);
+	uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+	y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+	uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_MVTB:
+		y_plane = y_stride * y_sclines;
+		uv_plane = uv_stride * uv_sclines;
+		offset = y_plane + uv_plane;
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return offset;
+}
+
+#endif
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 89cb608..dc85ccc 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -952,12 +952,10 @@
 }
 
 static void dp_catalog_panel_config_msa(struct dp_catalog_panel *panel,
-					u32 rate, u32 stream_rate_khz,
-					bool fixed_nvid)
+					u32 rate, u32 stream_rate_khz)
 {
 	u32 pixel_m, pixel_n;
 	u32 mvid, nvid;
-	u64 mvid_calc;
 	u32 const nvid_fixed = 0x8000;
 	u32 const link_rate_hbr2 = 540000;
 	u32 const link_rate_hbr3 = 810000;
@@ -977,57 +975,39 @@
 	}
 
 	catalog = dp_catalog_get_priv(panel);
-	if (fixed_nvid) {
-		pr_debug("use fixed NVID=0x%x\n", nvid_fixed);
-		nvid = nvid_fixed;
+	io_data = catalog->io.dp_mmss_cc;
 
-		pr_debug("link rate=%dkbps, stream_rate_khz=%uKhz\n",
-			rate, stream_rate_khz);
+	if (panel->stream_id == DP_STREAM_1)
+		strm_reg_off = MMSS_DP_PIXEL1_M - MMSS_DP_PIXEL_M;
 
-		/*
-		 * For intermediate results, use 64 bit arithmetic to avoid
-		 * loss of precision.
-		 */
-		mvid_calc = (u64) stream_rate_khz * nvid;
-		mvid_calc = div_u64(mvid_calc, rate);
+	pixel_m = dp_read(catalog->exe_mode, io_data,
+			MMSS_DP_PIXEL_M + strm_reg_off);
+	pixel_n = dp_read(catalog->exe_mode, io_data,
+			MMSS_DP_PIXEL_N + strm_reg_off);
+	pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
 
-		/*
-		 * truncate back to 32 bits as this final divided value will
-		 * always be within the range of a 32 bit unsigned int.
-		 */
-		mvid = (u32) mvid_calc;
+	mvid = (pixel_m & 0xFFFF) * 5;
+	nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
 
-		if (panel->widebus_en) {
-			mvid <<= 1;
-			nvid <<= 1;
-		}
-	} else {
-		io_data = catalog->io.dp_mmss_cc;
+	if (nvid < nvid_fixed) {
+		u32 temp;
 
-		if (panel->stream_id == DP_STREAM_1)
-			strm_reg_off = MMSS_DP_PIXEL1_M - MMSS_DP_PIXEL_M;
-
-		pixel_m = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_PIXEL_M + strm_reg_off);
-		pixel_n = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_PIXEL_N + strm_reg_off);
-		pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
-
-		mvid = (pixel_m & 0xFFFF) * 5;
-		nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
-
-		pr_debug("rate = %d\n", rate);
-
-		if (panel->widebus_en)
-			mvid <<= 1;
-
-		if (link_rate_hbr2 == rate)
-			nvid *= 2;
-
-		if (link_rate_hbr3 == rate)
-			nvid *= 3;
+		temp = (nvid_fixed / nvid) * nvid;
+		mvid = (nvid_fixed / nvid) * mvid;
+		nvid = temp;
 	}
 
+	pr_debug("rate = %d\n", rate);
+
+	if (panel->widebus_en)
+		mvid <<= 1;
+
+	if (link_rate_hbr2 == rate)
+		nvid *= 2;
+
+	if (link_rate_hbr3 == rate)
+		nvid *= 3;
+
 	io_data = catalog->io.dp_link;
 
 	if (panel->stream_id == DP_STREAM_1) {
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 9d536b4..78aec713 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -223,7 +223,7 @@
 	void (*config_spd)(struct dp_catalog_panel *panel);
 	void (*config_misc)(struct dp_catalog_panel *panel);
 	void (*config_msa)(struct dp_catalog_panel *panel,
-			u32 rate, u32 stream_rate_khz, bool fixed_nvid);
+			u32 rate, u32 stream_rate_khz);
 	void (*update_transfer_unit)(struct dp_catalog_panel *panel);
 	void (*config_ctrl)(struct dp_catalog_panel *panel, u32 cfg);
 	void (*config_dto)(struct dp_catalog_panel *panel, bool ack);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog_v200.c b/drivers/gpu/drm/msm/dp/dp_catalog_v200.c
index da02f7a..132e50e 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog_v200.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog_v200.c
@@ -118,12 +118,10 @@
 }
 
 static void dp_catalog_panel_config_msa_v200(struct dp_catalog_panel *panel,
-					u32 rate, u32 stream_rate_khz,
-					bool fixed_nvid)
+					u32 rate, u32 stream_rate_khz)
 {
 	u32 pixel_m, pixel_n;
 	u32 mvid, nvid;
-	u64 mvid_calc;
 	u32 const nvid_fixed = 0x8000;
 	u32 const link_rate_hbr2 = 540000;
 	u32 const link_rate_hbr3 = 810000;
@@ -143,58 +141,40 @@
 	}
 
 	catalog = dp_catalog_get_priv_v200(panel);
-	if (fixed_nvid) {
-		pr_debug("use fixed NVID=0x%x\n", nvid_fixed);
-		nvid = nvid_fixed;
+	io_data = catalog->io->dp_mmss_cc;
 
-		pr_debug("link rate=%dkbps, stream_rate_khz=%uKhz\n",
-			rate, stream_rate_khz);
+	if (panel->stream_id == DP_STREAM_1)
+		strm_reg_off = MMSS_DP_PIXEL1_M_V200 -
+					MMSS_DP_PIXEL_M_V200;
 
-		/*
-		 * For intermediate results, use 64 bit arithmetic to avoid
-		 * loss of precision.
-		 */
-		mvid_calc = (u64) stream_rate_khz * nvid;
-		mvid_calc = div_u64(mvid_calc, rate);
+	pixel_m = dp_read(catalog->exe_mode, io_data,
+			MMSS_DP_PIXEL_M_V200 + strm_reg_off);
+	pixel_n = dp_read(catalog->exe_mode, io_data,
+			MMSS_DP_PIXEL_N_V200 + strm_reg_off);
+	pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
 
-		/*
-		 * truncate back to 32 bits as this final divided value will
-		 * always be within the range of a 32 bit unsigned int.
-		 */
-		mvid = (u32) mvid_calc;
+	mvid = (pixel_m & 0xFFFF) * 5;
+	nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
 
-		if (panel->widebus_en) {
-			mvid <<= 1;
-			nvid <<= 1;
-		}
-	} else {
-		io_data = catalog->io->dp_mmss_cc;
+	if (nvid < nvid_fixed) {
+		u32 temp;
 
-		if (panel->stream_id == DP_STREAM_1)
-			strm_reg_off = MMSS_DP_PIXEL1_M_V200 -
-						MMSS_DP_PIXEL_M_V200;
-
-		pixel_m = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_PIXEL_M_V200 + strm_reg_off);
-		pixel_n = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_PIXEL_N_V200 + strm_reg_off);
-		pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
-
-		mvid = (pixel_m & 0xFFFF) * 5;
-		nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
-
-		pr_debug("rate = %d\n", rate);
-
-		if (panel->widebus_en)
-			mvid <<= 1;
-
-		if (link_rate_hbr2 == rate)
-			nvid *= 2;
-
-		if (link_rate_hbr3 == rate)
-			nvid *= 3;
+		temp = (nvid_fixed / nvid) * nvid;
+		mvid = (nvid_fixed / nvid) * mvid;
+		nvid = temp;
 	}
 
+	pr_debug("rate = %d\n", rate);
+
+	if (panel->widebus_en)
+		mvid <<= 1;
+
+	if (link_rate_hbr2 == rate)
+		nvid *= 2;
+
+	if (link_rate_hbr3 == rate)
+		nvid *= 3;
+
 	io_data = catalog->io->dp_link;
 
 	if (panel->stream_id == DP_STREAM_1) {
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c b/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
index 50b8859..d5eebb4 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
@@ -122,12 +122,10 @@
 }
 
 static void dp_catalog_panel_config_msa_v420(struct dp_catalog_panel *panel,
-					u32 rate, u32 stream_rate_khz,
-					bool fixed_nvid)
+					u32 rate, u32 stream_rate_khz)
 {
 	u32 pixel_m, pixel_n;
 	u32 mvid, nvid, reg_off = 0, mvid_off = 0, nvid_off = 0;
-	u64 mvid_calc;
 	u32 const nvid_fixed = 0x8000;
 	u32 const link_rate_hbr2 = 540000;
 	u32 const link_rate_hbr3 = 810000;
@@ -145,57 +143,39 @@
 	}
 
 	catalog = dp_catalog_get_priv_v420(panel);
-	if (fixed_nvid) {
-		pr_debug("use fixed NVID=0x%x\n", nvid_fixed);
-		nvid = nvid_fixed;
+	io_data = catalog->io->dp_mmss_cc;
 
-		pr_debug("link rate=%dkbps, stream_rate_khz=%uKhz\n",
-			rate, stream_rate_khz);
+	if (panel->stream_id == DP_STREAM_1)
+		reg_off = MMSS_DP_PIXEL1_M_V420 - MMSS_DP_PIXEL_M_V420;
 
-		/*
-		 * For intermediate results, use 64 bit arithmetic to avoid
-		 * loss of precision.
-		 */
-		mvid_calc = (u64) stream_rate_khz * nvid;
-		mvid_calc = div_u64(mvid_calc, rate);
+	pixel_m = dp_read(catalog->exe_mode, io_data,
+			MMSS_DP_PIXEL_M_V420 + reg_off);
+	pixel_n = dp_read(catalog->exe_mode, io_data,
+			MMSS_DP_PIXEL_N_V420 + reg_off);
+	pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
 
-		/*
-		 * truncate back to 32 bits as this final divided value will
-		 * always be within the range of a 32 bit unsigned int.
-		 */
-		mvid = (u32) mvid_calc;
+	mvid = (pixel_m & 0xFFFF) * 5;
+	nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
 
-		if (panel->widebus_en) {
-			mvid <<= 1;
-			nvid <<= 1;
-		}
-	} else {
-		io_data = catalog->io->dp_mmss_cc;
+	if (nvid < nvid_fixed) {
+		u32 temp;
 
-		if (panel->stream_id == DP_STREAM_1)
-			reg_off = MMSS_DP_PIXEL1_M_V420 - MMSS_DP_PIXEL_M_V420;
-
-		pixel_m = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_PIXEL_M_V420 + reg_off);
-		pixel_n = dp_read(catalog->exe_mode, io_data,
-				MMSS_DP_PIXEL_N_V420 + reg_off);
-		pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
-
-		mvid = (pixel_m & 0xFFFF) * 5;
-		nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
-
-		pr_debug("rate = %d\n", rate);
-
-		if (panel->widebus_en)
-			mvid <<= 1;
-
-		if (link_rate_hbr2 == rate)
-			nvid *= 2;
-
-		if (link_rate_hbr3 == rate)
-			nvid *= 3;
+		temp = (nvid_fixed / nvid) * nvid;
+		mvid = (nvid_fixed / nvid) * mvid;
+		nvid = temp;
 	}
 
+	pr_debug("rate = %d\n", rate);
+
+	if (panel->widebus_en)
+		mvid <<= 1;
+
+	if (link_rate_hbr2 == rate)
+		nvid *= 2;
+
+	if (link_rate_hbr3 == rate)
+		nvid *= 3;
+
 	io_data = catalog->io->dp_link;
 
 	if (panel->stream_id == DP_STREAM_1) {
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 75a2f16..811ba98 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1161,7 +1161,7 @@
 
 	ctrl->mst_mode = mst_mode;
 	ctrl->fec_mode = fec_mode;
-	rate = ctrl->panel->link_info.rate;
+	rate = ctrl->panel->get_optimal_link_rate(ctrl->panel);
 
 	if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
 		pr_debug("using phy test link parameters\n");
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index ccfa611f..f2ef730 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -1310,6 +1310,7 @@
 	const u32 num_components = 3, default_bpp = 24;
 	struct dp_display_private *dp;
 	struct dp_panel *dp_panel;
+	u32 rc;
 
 	if (!dp_display || !panel) {
 		pr_err("invalid input\n");
@@ -1334,7 +1335,14 @@
 			mode->timing.bpp, mode->timing.pixel_clk_khz);
 
 	dp_panel->pinfo = mode->timing;
-	dp_panel->init(dp_panel);
+	rc = dp_panel->init(dp_panel);
+
+	if (rc == -EAGAIN) {
+		dp->ctrl->off(dp->ctrl);
+		dp->ctrl->on(dp->ctrl, dp->mst.mst_active,
+			dp->panel->fec_en, false);
+	}
+
 	mutex_unlock(&dp->session_lock);
 
 	return 0;
@@ -1894,7 +1902,8 @@
 	if (free_dsc_blks >= required_dsc_blks)
 		dp_mode->capabilities |= DP_PANEL_CAPS_DSC;
 
-	pr_debug("in_use:%d, max:%d, free:%d, req:%d, caps:0x%x, width:%d\n",
+	if (dp_mode->capabilities & DP_PANEL_CAPS_DSC)
+		pr_debug("in_use:%d, max:%d, free:%d, req:%d, caps:0x%x, width:%d\n",
 			dp->tot_dsc_blks_in_use, dp->parser->max_dp_dsc_blks,
 			free_dsc_blks, required_dsc_blks, dp_mode->capabilities,
 			dp->parser->max_dp_dsc_input_width_pixs);
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
index 76a5f21..a48fe5f 100644
--- a/drivers/gpu/drm/msm/dp/dp_hpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
@@ -46,23 +46,23 @@
 
 	if (parser->no_aux_switch && parser->lphw_hpd) {
 		dp_hpd = dp_lphw_hpd_get(dev, parser, catalog, cb);
-		if (!dp_hpd) {
+		if (IS_ERR(dp_hpd)) {
 			pr_err("failed to get lphw hpd\n");
 			return dp_hpd;
 		}
 		dp_hpd->type = DP_HPD_LPHW;
 	} else if (parser->no_aux_switch) {
 		dp_hpd = dp_gpio_hpd_get(dev, cb);
-		if (!dp_hpd) {
+		if (IS_ERR(dp_hpd)) {
 			pr_err("failed to get gpio hpd\n");
-			goto out;
+			return dp_hpd;
 		}
 		dp_hpd->type = DP_HPD_GPIO;
 	} else {
 		dp_hpd = dp_usbpd_get(dev, cb);
-		if (!dp_hpd) {
+		if (IS_ERR(dp_hpd)) {
 			pr_err("failed to get usbpd\n");
-			goto out;
+			return dp_hpd;
 		}
 		dp_hpd->type = DP_HPD_USBPD;
 	}
@@ -74,7 +74,6 @@
 	if (!dp_hpd->isr)
 		dp_hpd->isr		= dp_hpd_isr;
 
-out:
 	return dp_hpd;
 }
 
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 4e06924..15413f6 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -74,6 +74,9 @@
 	u8 spd_product_description[16];
 	u8 major;
 	u8 minor;
+	u32 bpp;
+	u32 active_pclk;
+	u32 optimal_link_rate;
 };
 
 static const struct dp_panel_info fail_safe = {
@@ -1764,12 +1767,50 @@
 	return 0;
 }
 
+static u32 dp_panel_get_optimal_link_rate(struct dp_panel *dp_panel)
+{
+	struct dp_panel_private *panel;
+	u32 lrate, rate = 0;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	/*
+	 * As MST can support multiple streams,
+	 * do not optimize the link rate for MST.
+	 */
+	if (panel->dp_panel.mst_state) {
+		rate = panel->dp_panel.link_info.rate;
+		goto end;
+	}
+
+	lrate = ((panel->active_pclk / panel->dp_panel.link_info.num_lanes) *
+			panel->bpp) / 8;
+
+	if (lrate <= DP_LINK_RATE_RBR)
+		rate = DP_LINK_RATE_RBR;
+	else if (lrate <= DP_LINK_RATE_HBR)
+		rate = DP_LINK_RATE_HBR;
+	else if (lrate <= DP_LINK_RATE_HBR2)
+		rate = DP_LINK_RATE_HBR2;
+	else
+		rate = DP_LINK_RATE_HBR3;
+end:
+	panel->optimal_link_rate = rate;
+	return rate;
+}
+
 static int dp_panel_read_edid(struct dp_panel *dp_panel,
 	struct drm_connector *connector)
 {
 	int ret = 0;
 	struct dp_panel_private *panel;
 	struct edid *edid;
+	struct drm_display_mode *mode;
 
 	if (!dp_panel) {
 		pr_err("invalid input\n");
@@ -1790,6 +1831,16 @@
 		ret = -EINVAL;
 		goto end;
 	}
+
+	mutex_lock(&connector->dev->mode_config.mutex);
+	_sde_edid_update_modes(connector, dp_panel->edid_ctrl);
+	mutex_unlock(&connector->dev->mode_config.mutex);
+
+	mode = list_first_entry(&connector->probed_modes,
+				 struct drm_display_mode, head);
+
+	panel->bpp = connector->display_info.bpc * 3;
+	panel->active_pclk = mode->clock;
 end:
 	edid = dp_panel->edid_ctrl->edid;
 	dp_panel->audio_supported = drm_detect_monitor_audio(edid);
@@ -2325,6 +2376,7 @@
 	int rc = 0;
 	struct dp_panel_private *panel;
 	struct dp_panel_info *pinfo;
+	u32 current_link_rate;
 
 	if (!dp_panel) {
 		pr_err("invalid input\n");
@@ -2348,6 +2400,13 @@
 		pinfo->refresh_rate, pinfo->bpp, pinfo->pixel_clk_khz,
 		panel->link->link_params.bw_code,
 		panel->link->link_params.lane_count);
+
+	panel->active_pclk = pinfo->pixel_clk_khz;
+	current_link_rate = panel->optimal_link_rate;
+	dp_panel_get_optimal_link_rate(dp_panel);
+
+	if (panel->optimal_link_rate != current_link_rate)
+		rc = -EAGAIN;
 end:
 	return rc;
 }
@@ -2701,47 +2760,22 @@
 	catalog->config_misc(catalog);
 }
 
-static bool dp_panel_use_fixed_nvid(struct dp_panel *dp_panel)
-{
-	u8 *dpcd = dp_panel->dpcd;
-	struct sde_connector *c_conn = to_sde_connector(dp_panel->connector);
-
-	/* use fixe mvid and nvid for MST streams */
-	if (c_conn->mst_port)
-		return true;
-
-	/*
-	 * For better interop experience, used a fixed NVID=0x8000
-	 * whenever connected to a VGA dongle downstream.
-	 */
-	if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) {
-		u8 type = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
-			DP_DWN_STRM_PORT_TYPE_MASK;
-		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG)
-			return true;
-	}
-
-	return false;
-}
-
 static void dp_panel_config_msa(struct dp_panel *dp_panel)
 {
 	struct dp_panel_private *panel;
 	struct dp_catalog_panel *catalog;
 	u32 rate;
 	u32 stream_rate_khz;
-	bool fixed_nvid;
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 	catalog = panel->catalog;
 
 	catalog->widebus_en = dp_panel->widebus_en;
 
-	fixed_nvid = dp_panel_use_fixed_nvid(dp_panel);
 	rate = drm_dp_bw_code_to_link_rate(panel->link->link_params.bw_code);
 	stream_rate_khz = dp_panel->pinfo.pixel_clk_khz;
 
-	catalog->config_msa(catalog, rate, stream_rate_khz, fixed_nvid);
+	catalog->config_msa(catalog, rate, stream_rate_khz);
 }
 
 static int dp_panel_hw_cfg(struct dp_panel *dp_panel, bool enable)
@@ -2937,34 +2971,32 @@
 		goto error;
 	}
 
+	dp_panel = &panel->dp_panel;
+
+	if (in->base_panel) {
+		struct dp_panel_private *base_panel_priv =
+			container_of(in->base_panel,
+				struct dp_panel_private, dp_panel);
+
+		memcpy(panel, base_panel_priv, sizeof(*panel));
+
+		goto update;
+	}
+
 	panel->dev = in->dev;
 	panel->aux = in->aux;
 	panel->catalog = in->catalog;
 	panel->link = in->link;
 	panel->parser = in->parser;
 
-	dp_panel = &panel->dp_panel;
 	dp_panel->max_bw_code = DP_LINK_BW_8_1;
 	dp_panel->spd_enabled = true;
 	memcpy(panel->spd_vendor_name, vendor_name, (sizeof(u8) * 8));
 	memcpy(panel->spd_product_description, product_desc, (sizeof(u8) * 16));
-	dp_panel->connector = in->connector;
 
 	dp_panel->dsc_feature_enable = panel->parser->dsc_feature_enable;
 	dp_panel->fec_feature_enable = panel->parser->fec_feature_enable;
 
-	if (in->base_panel) {
-		memcpy(dp_panel->dpcd, in->base_panel->dpcd,
-				DP_RECEIVER_CAP_SIZE + 1);
-		memcpy(&dp_panel->link_info, &in->base_panel->link_info,
-				sizeof(dp_panel->link_info));
-		dp_panel->mst_state = in->base_panel->mst_state;
-		dp_panel->widebus_en = in->base_panel->widebus_en;
-		dp_panel->fec_en = in->base_panel->fec_en;
-		dp_panel->dsc_en = in->base_panel->dsc_en;
-		dp_panel->fec_overhead_fp = in->base_panel->fec_overhead_fp;
-	}
-
 	dp_panel->init = dp_panel_init_panel_info;
 	dp_panel->deinit = dp_panel_deinit_panel_info;
 	dp_panel->hw_cfg = dp_panel_hw_cfg;
@@ -2985,7 +3017,9 @@
 	dp_panel->read_mst_cap = dp_panel_read_mst_cap;
 	dp_panel->convert_to_dp_mode = dp_panel_convert_to_dp_mode;
 	dp_panel->update_pps = dp_panel_update_pps;
-
+	dp_panel->get_optimal_link_rate = dp_panel_get_optimal_link_rate;
+update:
+	dp_panel->connector = in->connector;
 	sde_conn = to_sde_connector(dp_panel->connector);
 	sde_conn->drv_panel = dp_panel;
 
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 90d5346..a3473ec 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -18,6 +18,11 @@
 #define DP_RECEIVER_DSC_CAP_SIZE    15
 #define DP_RECEIVER_FEC_STATUS_SIZE 3
 
+#define DP_LINK_RATE_RBR  162000
+#define DP_LINK_RATE_HBR  270000
+#define DP_LINK_RATE_HBR2 540000
+#define DP_LINK_RATE_HBR3 810000
+
 /*
  * A source initiated power down flag is set
  * when the DP is powered off while physical
@@ -163,6 +168,7 @@
 		const struct drm_display_mode *drm_mode,
 		struct dp_display_mode *dp_mode);
 	void (*update_pps)(struct dp_panel *dp_panel, char *pps_cmd);
+	u32 (*get_optimal_link_rate)(struct dp_panel *dp_panel);
 };
 
 struct dp_tu_calc_input {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
index a6d2a3a..8df20ae 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
@@ -113,7 +113,14 @@
 			    struct dsi_phy_cfg *cfg)
 {
 	int i;
-	u8 tx_dctrl[] = {0x00, 0x00, 0x00, 0x04, 0x01};
+	u8 tx_dctrl_v4[] = {0x00, 0x00, 0x00, 0x04, 0x01};
+	u8 tx_dctrl_v4_1[] = {0x40, 0x40, 0x40, 0x46, 0x41};
+	u8 *tx_dctrl;
+
+	if (phy->version == DSI_PHY_VERSION_4_1)
+		tx_dctrl = &tx_dctrl_v4_1[0];
+	else
+		tx_dctrl = &tx_dctrl_v4[0];
 
 	/* Strength ctrl settings */
 	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 895a94d..021971e 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -128,11 +128,26 @@
 		goto fail_put;
 	}
 
-	domain = (flags & ION_FLAG_SECURE) ? MSM_SMMU_DOMAIN_SECURE :
-						MSM_SMMU_DOMAIN_UNSECURE;
-	if (kms && kms->funcs->get_address_space_device)
-		attach_dev = kms->funcs->get_address_space_device(
-							kms, domain);
+	if (!kms || !kms->funcs->get_address_space_device) {
+		DRM_ERROR("invalid kms ops\n");
+		goto fail_put;
+	}
+
+	if (flags & ION_FLAG_SECURE) {
+		if (flags & ION_FLAG_CP_PIXEL)
+			attach_dev = kms->funcs->get_address_space_device(kms,
+						MSM_SMMU_DOMAIN_SECURE);
+
+		else if ((flags & ION_FLAG_CP_SEC_DISPLAY)
+				|| (flags & ION_FLAG_CP_CAMERA_PREVIEW))
+			attach_dev = dev->dev;
+		else
+			DRM_ERROR("invalid ion secure flag: 0x%x\n", flags);
+	} else {
+		attach_dev = kms->funcs->get_address_space_device(kms,
+						MSM_SMMU_DOMAIN_UNSECURE);
+	}
+
 	if (!attach_dev) {
 		DRM_ERROR("aspace device not found for domain:%d\n", domain);
 		ret = -EINVAL;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 9122ee6..1fe9392 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -63,7 +63,7 @@
 	struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
 	void (*recover)(struct msm_gpu *gpu);
 	void (*destroy)(struct msm_gpu *gpu);
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
 	/* show GPU status in debugfs: */
 	void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
 			struct drm_printer *p);
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index ca9050c..5e7ba86 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -16,6 +16,7 @@
 #include "sde_hw_interrupts.h"
 #include "sde_core_irq.h"
 #include "dsi_panel.h"
+#include "sde_hw_color_proc_common_v4.h"
 
 struct sde_cp_node {
 	u32 property_id;
@@ -93,6 +94,8 @@
 		struct sde_hw_dspp *hw_dspp, struct sde_hw_cp_cfg *hw_cfg);
 static void sde_cp_notify_ltm_hist(struct drm_crtc *crtc_drm, void *arg);
 static void sde_cp_notify_ltm_wb_pb(struct drm_crtc *crtc_drm, void *arg);
+static void _sde_cp_crtc_update_ltm_roi(struct sde_crtc *sde_crtc,
+		struct sde_hw_cp_cfg *hw_cfg);
 
 #define setup_dspp_prop_install_funcs(func) \
 do { \
@@ -539,10 +542,12 @@
 {
 	int ret = 0;
 
-	if (!hw_dspp || !hw_dspp->ops.setup_ltm_roi)
+	if (!hw_dspp || !hw_dspp->ops.setup_ltm_roi) {
 		ret = -EINVAL;
-	else
+	} else {
 		hw_dspp->ops.setup_ltm_roi(hw_dspp, hw_cfg);
+		_sde_cp_crtc_update_ltm_roi(hw_crtc, hw_cfg);
+	}
 
 	return ret;
 }
@@ -2852,6 +2857,7 @@
 {
 	unsigned long irq_flags;
 	struct sde_hw_mixer *hw_lm = hw_cfg->mixer_info;
+	u32 i = 0;
 
 	spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
 	if (!hw_lm->cfg.right_mixer && !sde_crtc->ltm_hist_en) {
@@ -2860,6 +2866,11 @@
 		return;
 	}
 	sde_crtc->ltm_hist_en = false;
+	INIT_LIST_HEAD(&sde_crtc->ltm_buf_free);
+	INIT_LIST_HEAD(&sde_crtc->ltm_buf_busy);
+	for (i = 0; i < sde_crtc->ltm_buffer_cnt; i++)
+		list_add(&sde_crtc->ltm_buffers[i]->node,
+			&sde_crtc->ltm_buf_free);
 	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
 }
 
@@ -2873,6 +2884,9 @@
 	u64 addr = 0;
 	int idx = -1;
 	unsigned long irq_flags;
+	struct sde_ltm_phase_info phase;
+	struct sde_hw_cp_cfg hw_cfg;
+	struct sde_hw_mixer *hw_lm;
 
 	if (!sde_crtc) {
 		DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
@@ -2913,11 +2927,6 @@
 				0);
 		}
 
-		INIT_LIST_HEAD(&sde_crtc->ltm_buf_free);
-		INIT_LIST_HEAD(&sde_crtc->ltm_buf_busy);
-		for (i = 0; i < sde_crtc->ltm_buffer_cnt; i++)
-			list_add(&sde_crtc->ltm_buffers[i]->node,
-				&sde_crtc->ltm_buf_free);
 		spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
 		return;
 	}
@@ -2965,6 +2974,30 @@
 		((u8 *)sde_crtc->ltm_buffers[idx]->kva +
 		sde_crtc->ltm_buffers[idx]->offset);
 	ltm_data->status_flag = ltm_hist_status;
+
+	hw_lm = sde_crtc->mixers[0].hw_lm;
+	if (!hw_lm) {
+		DRM_ERROR("invalid layer mixer\n");
+		return;
+	}
+	hw_cfg.num_of_mixers = num_mixers;
+	hw_cfg.displayh = num_mixers * hw_lm->cfg.out_width;
+	hw_cfg.displayv = hw_lm->cfg.out_height;
+
+	sde_ltm_get_phase_info(&hw_cfg, &phase);
+	ltm_data->display_h = hw_cfg.displayh;
+	ltm_data->display_v = hw_cfg.displayv;
+	ltm_data->init_h[0] = phase.init_h[LTM_0];
+	ltm_data->init_h[1] = phase.init_h[LTM_1];
+	ltm_data->init_v = phase.init_v;
+	ltm_data->inc_v = phase.inc_v;
+	ltm_data->inc_h = phase.inc_h;
+	ltm_data->portrait_en = phase.portrait_en;
+	ltm_data->merge_en = phase.merge_en;
+	ltm_data->cfg_param_01 = sde_crtc->ltm_cfg.cfg_param_01;
+	ltm_data->cfg_param_02 = sde_crtc->ltm_cfg.cfg_param_02;
+	ltm_data->cfg_param_03 = sde_crtc->ltm_cfg.cfg_param_03;
+	ltm_data->cfg_param_04 = sde_crtc->ltm_cfg.cfg_param_04;
 	sde_crtc_event_queue(&sde_crtc->base, sde_cp_notify_ltm_hist,
 				sde_crtc->ltm_buffers[idx], true);
 	spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
@@ -3188,3 +3221,45 @@
 	}
 	return ret;
 }
+
+static void _sde_cp_crtc_update_ltm_roi(struct sde_crtc *sde_crtc,
+		struct sde_hw_cp_cfg *hw_cfg)
+{
+	struct drm_msm_ltm_cfg_param *cfg_param = NULL;
+
+	/* disable case */
+	if (!hw_cfg->payload) {
+		memset(&sde_crtc->ltm_cfg, 0,
+			sizeof(struct drm_msm_ltm_cfg_param));
+		return;
+	}
+
+	if (hw_cfg->len != sizeof(struct drm_msm_ltm_cfg_param)) {
+		DRM_ERROR("invalid size of payload len %d exp %zd\n",
+			hw_cfg->len, sizeof(struct drm_msm_ltm_cfg_param));
+		return;
+	}
+
+	cfg_param = hw_cfg->payload;
+	/* input param exceeds the display width */
+	if (cfg_param->cfg_param_01 + cfg_param->cfg_param_03 >
+			hw_cfg->displayh) {
+		DRM_DEBUG_DRIVER("invalid input = [%u,%u], displayh = %u\n",
+			cfg_param->cfg_param_01, cfg_param->cfg_param_03,
+			hw_cfg->displayh);
+		/* set the roi width to max register value */
+		cfg_param->cfg_param_03 = 0xFFFF;
+	}
+
+	/* input param exceeds the display height */
+	if (cfg_param->cfg_param_02 + cfg_param->cfg_param_04 >
+			hw_cfg->displayv) {
+		DRM_DEBUG_DRIVER("invalid input = [%u,%u], displayv = %u\n",
+			cfg_param->cfg_param_02, cfg_param->cfg_param_04,
+			hw_cfg->displayv);
+		/* set the roi height to max register value */
+		cfg_param->cfg_param_04 = 0xFFFF;
+	}
+
+	sde_crtc->ltm_cfg = *cfg_param;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 05dcceb..5bea5e5 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -1186,7 +1186,8 @@
 	struct sde_connector *c_conn;
 	struct sde_connector_state *c_state;
 	int idx, rc;
-	uint64_t fence_fd;
+	uint64_t fence_user_fd;
+	uint64_t __user prev_user_fd;
 
 	if (!connector || !state || !property) {
 		SDE_ERROR("invalid argument(s), conn %pK, state %pK, prp %pK\n",
@@ -1229,23 +1230,42 @@
 		if (!val)
 			goto end;
 
-		/*
-		 * update the the offset to a timeline for commit completion
-		 */
-		rc = sde_fence_create(c_conn->retire_fence, &fence_fd, 1);
+		rc = copy_from_user(&prev_user_fd, (void __user *)val,
+				sizeof(uint64_t));
 		if (rc) {
-			SDE_ERROR("fence create failed rc:%d\n", rc);
+			SDE_ERROR("copy from user failed rc:%d\n", rc);
+			rc = -EFAULT;
 			goto end;
 		}
 
-		rc = copy_to_user((uint64_t __user *)(uintptr_t)val, &fence_fd,
-			sizeof(uint64_t));
-		if (rc) {
-			SDE_ERROR("copy to user failed rc:%d\n", rc);
-			/* fence will be released with timeline update */
-			put_unused_fd(fence_fd);
-			rc = -EFAULT;
-			goto end;
+		/*
+		 * client is expected to reset the property to -1 before
+		 * requesting for the retire fence
+		 */
+		if (prev_user_fd == -1) {
+			/*
+			 * update the offset to a timeline for
+			 * commit completion
+			 */
+			rc = sde_fence_create(c_conn->retire_fence,
+						&fence_user_fd, 1);
+			if (rc) {
+				SDE_ERROR("fence create failed rc:%d\n", rc);
+				goto end;
+			}
+
+			rc = copy_to_user((uint64_t __user *)(uintptr_t)val,
+					&fence_user_fd, sizeof(uint64_t));
+			if (rc) {
+				SDE_ERROR("copy to user failed rc:%d\n", rc);
+				/*
+				 * fence will be released with timeline
+				 * update
+				 */
+				put_unused_fd(fence_user_fd);
+				rc = -EFAULT;
+				goto end;
+			}
 		}
 		break;
 	case CONNECTOR_PROP_ROI_V1:
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 47771a3..e4faed3 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -5112,7 +5112,8 @@
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
 	int idx, ret;
-	uint64_t fence_fd;
+	uint64_t fence_user_fd;
+	uint64_t __user prev_user_fd;
 
 	if (!crtc || !state || !property) {
 		SDE_ERROR("invalid argument(s)\n");
@@ -5172,19 +5173,34 @@
 		if (!val)
 			goto exit;
 
-		ret = _sde_crtc_get_output_fence(crtc, state, &fence_fd);
+		ret = copy_from_user(&prev_user_fd, (void __user *)val,
+				sizeof(uint64_t));
 		if (ret) {
-			SDE_ERROR("fence create failed rc:%d\n", ret);
+			SDE_ERROR("copy from user failed rc:%d\n", ret);
+			ret = -EFAULT;
 			goto exit;
 		}
 
-		ret = copy_to_user((uint64_t __user *)(uintptr_t)val, &fence_fd,
-				sizeof(uint64_t));
-		if (ret) {
-			SDE_ERROR("copy to user failed rc:%d\n", ret);
-			put_unused_fd(fence_fd);
-			ret = -EFAULT;
-			goto exit;
+		/*
+		 * client is expected to reset the property to -1 before
+		 * requesting for the release fence
+		 */
+		if (prev_user_fd == -1) {
+			ret = _sde_crtc_get_output_fence(crtc, state,
+					&fence_user_fd);
+			if (ret) {
+				SDE_ERROR("fence create failed rc:%d\n", ret);
+				goto exit;
+			}
+
+			ret = copy_to_user((uint64_t __user *)(uintptr_t)val,
+					&fence_user_fd, sizeof(uint64_t));
+			if (ret) {
+				SDE_ERROR("copy to user failed rc:%d\n", ret);
+				put_unused_fd(fence_user_fd);
+				ret = -EFAULT;
+				goto exit;
+			}
 		}
 		break;
 	default:
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index fcd94d8..0171f4c 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -339,6 +339,7 @@
 	struct list_head ltm_buf_free;
 	struct list_head ltm_buf_busy;
 	bool ltm_hist_en;
+	struct drm_msm_ltm_cfg_param ltm_cfg;
 	struct mutex ltm_buffer_lock;
 	spinlock_t ltm_lock;
 };
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index eb7876f..6754977 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -139,12 +139,13 @@
 	qos_params.xin_id = hw_wb->caps->xin_id;
 	qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
 	qos_params.num = hw_wb->idx - WB_0;
-	qos_params.is_rt = sde_crtc_get_client_type(crtc) != NRT_CLIENT;
+	qos_params.client_type = phys_enc->in_clone_mode ?
+					VBIF_CWB_CLIENT : VBIF_NRT_CLIENT;
 
-	SDE_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d rt:%d\n",
+	SDE_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d rt:%d clone:%d\n",
 			qos_params.num,
 			qos_params.vbif_idx,
-			qos_params.xin_id, qos_params.is_rt);
+			qos_params.xin_id, qos_params.client_type);
 
 	sde_vbif_set_qos_remap(phys_enc->sde_kms, &qos_params);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 6b3eb6c..31dcfa8 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -379,10 +379,12 @@
 	VBIF_DEFAULT_OT_WR_LIMIT,
 	VBIF_DYNAMIC_OT_RD_LIMIT,
 	VBIF_DYNAMIC_OT_WR_LIMIT,
-	VBIF_QOS_RT_REMAP,
-	VBIF_QOS_NRT_REMAP,
 	VBIF_MEMTYPE_0,
 	VBIF_MEMTYPE_1,
+	VBIF_QOS_RT_REMAP,
+	VBIF_QOS_NRT_REMAP,
+	VBIF_QOS_CWB_REMAP,
+	VBIF_QOS_LUTDMA_REMAP,
 	VBIF_PROP_MAX,
 };
 
@@ -397,6 +399,8 @@
 	REG_DMA_VERSION,
 	REG_DMA_TRIGGER_OFF,
 	REG_DMA_BROADCAST_DISABLED,
+	REG_DMA_XIN_ID,
+	REG_DMA_CLK_CTRL,
 	REG_DMA_PROP_MAX
 };
 
@@ -698,12 +702,16 @@
 		PROP_TYPE_U32_ARRAY},
 	{VBIF_DYNAMIC_OT_WR_LIMIT, "qcom,sde-vbif-dynamic-ot-wr-limit", false,
 		PROP_TYPE_U32_ARRAY},
+	{VBIF_MEMTYPE_0, "qcom,sde-vbif-memtype-0", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_MEMTYPE_1, "qcom,sde-vbif-memtype-1", false, PROP_TYPE_U32_ARRAY},
 	{VBIF_QOS_RT_REMAP, "qcom,sde-vbif-qos-rt-remap", false,
 		PROP_TYPE_U32_ARRAY},
 	{VBIF_QOS_NRT_REMAP, "qcom,sde-vbif-qos-nrt-remap", false,
 		PROP_TYPE_U32_ARRAY},
-	{VBIF_MEMTYPE_0, "qcom,sde-vbif-memtype-0", false, PROP_TYPE_U32_ARRAY},
-	{VBIF_MEMTYPE_1, "qcom,sde-vbif-memtype-1", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_CWB_REMAP, "qcom,sde-vbif-qos-cwb-remap", false,
+		PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_LUTDMA_REMAP, "qcom,sde-vbif-qos-lutdma-remap", false,
+		PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type uidle_prop[] = {
@@ -721,6 +729,10 @@
 		PROP_TYPE_U32},
 	[REG_DMA_BROADCAST_DISABLED] = {REG_DMA_BROADCAST_DISABLED,
 		"qcom,sde-reg-dma-broadcast-disabled", false, PROP_TYPE_BOOL},
+	[REG_DMA_XIN_ID] = {REG_DMA_XIN_ID,
+		"qcom,sde-reg-dma-xin-id", false, PROP_TYPE_U32},
+	[REG_DMA_CLK_CTRL] = {REG_DMA_XIN_ID,
+		"qcom,sde-reg-dma-clk-ctrl", false, PROP_TYPE_BIT_OFFSET_ARRAY},
 };
 
 static struct sde_prop_type merge_3d_prop[] = {
@@ -2692,61 +2704,41 @@
 	struct sde_vbif_cfg *vbif, struct sde_prop_value *prop_value,
 	int *prop_count)
 {
-	int j;
+	int i, j;
+	int prop_index = VBIF_QOS_RT_REMAP;
 
-	vbif->qos_rt_tbl.npriority_lvl =
-			prop_count[VBIF_QOS_RT_REMAP];
-	SDE_DEBUG("qos_rt_tbl.npriority_lvl=%u\n",
-			vbif->qos_rt_tbl.npriority_lvl);
-	if (vbif->qos_rt_tbl.npriority_lvl == sde_cfg->vbif_qos_nlvl) {
-		vbif->qos_rt_tbl.priority_lvl = kcalloc(
-			vbif->qos_rt_tbl.npriority_lvl, sizeof(u32),
-			GFP_KERNEL);
-		if (!vbif->qos_rt_tbl.priority_lvl)
-			return -ENOMEM;
-	} else if (vbif->qos_rt_tbl.npriority_lvl) {
-		vbif->qos_rt_tbl.npriority_lvl = 0;
-		vbif->qos_rt_tbl.priority_lvl = NULL;
-		SDE_ERROR("invalid qos rt table\n");
+	for (i = VBIF_RT_CLIENT;
+			((i < VBIF_MAX_CLIENT) && (prop_index < VBIF_PROP_MAX));
+				i++, prop_index++) {
+		vbif->qos_tbl[i].npriority_lvl = prop_count[prop_index];
+		SDE_DEBUG("qos_tbl[%d].npriority_lvl=%u\n",
+				i, vbif->qos_tbl[i].npriority_lvl);
+
+		if (vbif->qos_tbl[i].npriority_lvl == sde_cfg->vbif_qos_nlvl) {
+			vbif->qos_tbl[i].priority_lvl = kcalloc(
+					vbif->qos_tbl[i].npriority_lvl,
+					sizeof(u32), GFP_KERNEL);
+			if (!vbif->qos_tbl[i].priority_lvl)
+				return -ENOMEM;
+		} else if (vbif->qos_tbl[i].npriority_lvl) {
+			vbif->qos_tbl[i].npriority_lvl = 0;
+			vbif->qos_tbl[i].priority_lvl = NULL;
+			SDE_ERROR("invalid qos table for client:%d, prop:%d\n",
+					i, prop_index);
+		}
+
+		for (j = 0; j < vbif->qos_tbl[i].npriority_lvl; j++) {
+			vbif->qos_tbl[i].priority_lvl[j] =
+				PROP_VALUE_ACCESS(prop_value, prop_index, j);
+			SDE_DEBUG("client:%d, prop:%d, lvl[%d]=%u\n",
+					i, prop_index, j,
+					vbif->qos_tbl[i].priority_lvl[j]);
+		}
+
+		if (vbif->qos_tbl[i].npriority_lvl)
+			set_bit(SDE_VBIF_QOS_REMAP, &vbif->features);
 	}
 
-	for (j = 0; j < vbif->qos_rt_tbl.npriority_lvl; j++) {
-		vbif->qos_rt_tbl.priority_lvl[j] =
-			PROP_VALUE_ACCESS(prop_value,
-					VBIF_QOS_RT_REMAP, j);
-		SDE_DEBUG("lvl[%d]=%u\n", j,
-				vbif->qos_rt_tbl.priority_lvl[j]);
-	}
-
-	vbif->qos_nrt_tbl.npriority_lvl =
-			prop_count[VBIF_QOS_NRT_REMAP];
-	SDE_DEBUG("qos_nrt_tbl.npriority_lvl=%u\n",
-			vbif->qos_nrt_tbl.npriority_lvl);
-
-	if (vbif->qos_nrt_tbl.npriority_lvl == sde_cfg->vbif_qos_nlvl) {
-		vbif->qos_nrt_tbl.priority_lvl = kcalloc(
-			vbif->qos_nrt_tbl.npriority_lvl, sizeof(u32),
-			GFP_KERNEL);
-		if (!vbif->qos_nrt_tbl.priority_lvl)
-			return -ENOMEM;
-	} else if (vbif->qos_nrt_tbl.npriority_lvl) {
-		vbif->qos_nrt_tbl.npriority_lvl = 0;
-		vbif->qos_nrt_tbl.priority_lvl = NULL;
-		SDE_ERROR("invalid qos nrt table\n");
-	}
-
-	for (j = 0; j < vbif->qos_nrt_tbl.npriority_lvl; j++) {
-		vbif->qos_nrt_tbl.priority_lvl[j] =
-			PROP_VALUE_ACCESS(prop_value,
-					VBIF_QOS_NRT_REMAP, j);
-		SDE_DEBUG("lvl[%d]=%u\n", j,
-				vbif->qos_nrt_tbl.priority_lvl[j]);
-	}
-
-	if (vbif->qos_rt_tbl.npriority_lvl ||
-			vbif->qos_nrt_tbl.npriority_lvl)
-		set_bit(SDE_VBIF_QOS_REMAP, &vbif->features);
-
 	return 0;
 }
 
@@ -2829,6 +2821,16 @@
 	if (rc)
 		goto end;
 
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_0], 1,
+			&prop_count[VBIF_MEMTYPE_0], NULL);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_1], 1,
+			&prop_count[VBIF_MEMTYPE_1], NULL);
+	if (rc)
+		goto end;
+
 	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_RT_REMAP], 1,
 			&prop_count[VBIF_QOS_RT_REMAP], NULL);
 	if (rc)
@@ -2839,13 +2841,13 @@
 	if (rc)
 		goto end;
 
-	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_0], 1,
-			&prop_count[VBIF_MEMTYPE_0], NULL);
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_CWB_REMAP], 1,
+			&prop_count[VBIF_QOS_CWB_REMAP], NULL);
 	if (rc)
 		goto end;
 
-	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_1], 1,
-			&prop_count[VBIF_MEMTYPE_1], NULL);
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_LUTDMA_REMAP], 1,
+			&prop_count[VBIF_QOS_LUTDMA_REMAP], NULL);
 	if (rc)
 		goto end;
 
@@ -3152,40 +3154,52 @@
 static int sde_parse_reg_dma_dt(struct device_node *np,
 		struct sde_mdss_cfg *sde_cfg)
 {
-	u32 val;
-	int rc = 0;
-	int i = 0;
+	int rc = 0, i, prop_count[REG_DMA_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL;
+	u32 off_count;
+	bool prop_exists[REG_DMA_PROP_MAX];
 
-	sde_cfg->reg_dma_count = 0;
-	for (i = 0; i < REG_DMA_PROP_MAX; i++) {
-		if (reg_dma_prop[i].type == PROP_TYPE_BOOL) {
-			val = of_property_read_bool(np,
-					reg_dma_prop[i].prop_name);
-		} else {
-			rc = of_property_read_u32(np, reg_dma_prop[i].prop_name,
-					&val);
-			if (rc)
-				break;
-		}
-		switch (i) {
-		case REG_DMA_OFF:
-			sde_cfg->dma_cfg.base = val;
-			break;
-		case REG_DMA_VERSION:
-			sde_cfg->dma_cfg.version = val;
-			break;
-		case REG_DMA_TRIGGER_OFF:
-			sde_cfg->dma_cfg.trigger_sel_off = val;
-			break;
-		case REG_DMA_BROADCAST_DISABLED:
-			sde_cfg->dma_cfg.broadcast_disabled = val;
-			break;
-		default:
-			break;
-		}
+	prop_value = kcalloc(REG_DMA_PROP_MAX,
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
 	}
-	if (!rc && i == REG_DMA_PROP_MAX)
-		sde_cfg->reg_dma_count = 1;
+
+	rc = _validate_dt_entry(np, reg_dma_prop, ARRAY_SIZE(reg_dma_prop),
+			prop_count, &off_count);
+	if (rc || !off_count)
+		goto end;
+
+	rc = _read_dt_entry(np, reg_dma_prop, ARRAY_SIZE(reg_dma_prop),
+			prop_count, prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	sde_cfg->reg_dma_count = off_count;
+	sde_cfg->dma_cfg.base = PROP_VALUE_ACCESS(prop_value, REG_DMA_OFF, 0);
+	sde_cfg->dma_cfg.version = PROP_VALUE_ACCESS(prop_value,
+						REG_DMA_VERSION, 0);
+	sde_cfg->dma_cfg.trigger_sel_off = PROP_VALUE_ACCESS(prop_value,
+						REG_DMA_TRIGGER_OFF, 0);
+	sde_cfg->dma_cfg.broadcast_disabled = PROP_VALUE_ACCESS(prop_value,
+						REG_DMA_BROADCAST_DISABLED, 0);
+	sde_cfg->dma_cfg.xin_id = PROP_VALUE_ACCESS(prop_value,
+						REG_DMA_XIN_ID, 0);
+	sde_cfg->dma_cfg.clk_ctrl = SDE_CLK_CTRL_LUTDMA;
+	sde_cfg->dma_cfg.vbif_idx = VBIF_RT;
+
+	for (i = 0; i < sde_cfg->mdp_count; i++) {
+		sde_cfg->mdp[i].clk_ctrls[sde_cfg->dma_cfg.clk_ctrl].reg_off =
+			PROP_BITVALUE_ACCESS(prop_value,
+					REG_DMA_CLK_CTRL, 0, 0);
+		sde_cfg->mdp[i].clk_ctrls[sde_cfg->dma_cfg.clk_ctrl].bit_off =
+			PROP_BITVALUE_ACCESS(prop_value,
+					REG_DMA_CLK_CTRL, 0, 1);
+	}
+
+end:
+	kfree(prop_value);
 	/* reg dma is optional feature hence return 0 */
 	return 0;
 }
@@ -3914,7 +3928,7 @@
 
 void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
 {
-	int i;
+	int i, j;
 
 	if (!sde_cfg)
 		return;
@@ -3940,8 +3954,9 @@
 	for (i = 0; i < sde_cfg->vbif_count; i++) {
 		kfree(sde_cfg->vbif[i].dynamic_ot_rd_tbl.cfg);
 		kfree(sde_cfg->vbif[i].dynamic_ot_wr_tbl.cfg);
-		kfree(sde_cfg->vbif[i].qos_rt_tbl.priority_lvl);
-		kfree(sde_cfg->vbif[i].qos_nrt_tbl.priority_lvl);
+
+		for (j = VBIF_RT_CLIENT; j < VBIF_MAX_CLIENT; j++)
+			kfree(sde_cfg->vbif[i].qos_tbl[j].priority_lvl);
 	}
 
 	for (i = 0; i < SDE_QOS_LUT_USAGE_MAX; i++)
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 18e3762..2f7c781 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -705,6 +705,7 @@
 	SDE_CLK_CTRL_WB0,
 	SDE_CLK_CTRL_WB1,
 	SDE_CLK_CTRL_WB2,
+	SDE_CLK_CTRL_LUTDMA,
 	SDE_CLK_CTRL_MAX,
 };
 
@@ -998,6 +999,22 @@
 };
 
 /**
+ * enum sde_vbif_client_type
+ * @VBIF_RT_CLIENT: real time client
+ * @VBIF_NRT_CLIENT: non-realtime clients like writeback
+ * @VBIF_CWB_CLIENT: concurrent writeback client
+ * @VBIF_LUTDMA_CLIENT: LUTDMA client
+ * @VBIF_MAX_CLIENT: max number of clients
+ */
+enum sde_vbif_client_type {
+	VBIF_RT_CLIENT,
+	VBIF_NRT_CLIENT,
+	VBIF_CWB_CLIENT,
+	VBIF_LUTDMA_CLIENT,
+	VBIF_MAX_CLIENT
+};
+
+/**
  * struct sde_vbif_cfg - information of VBIF blocks
  * @id                 enum identifying this block
  * @base               register offset of this block
@@ -1007,8 +1024,7 @@
  * @xin_halt_timeout   maximum time (in usec) for xin to halt
  * @dynamic_ot_rd_tbl  dynamic OT read configuration table
  * @dynamic_ot_wr_tbl  dynamic OT write configuration table
- * @qos_rt_tbl         real-time QoS priority table
- * @qos_nrt_tbl        non-real-time QoS priority table
+ * @qos_tbl            Array of QoS priority table
  * @memtype_count      number of defined memtypes
  * @memtype            array of xin memtype definitions
  */
@@ -1019,8 +1035,7 @@
 	u32 xin_halt_timeout;
 	struct sde_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
 	struct sde_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
-	struct sde_vbif_qos_tbl qos_rt_tbl;
-	struct sde_vbif_qos_tbl qos_nrt_tbl;
+	struct sde_vbif_qos_tbl qos_tbl[VBIF_MAX_CLIENT];
 	u32 memtype_count;
 	u32 memtype[MAX_XIN_COUNT];
 };
@@ -1032,12 +1047,18 @@
  * @version            version of lutdma hw block
  * @trigger_sel_off    offset to trigger select registers of lutdma
  * @broadcast_disabled flag indicating if broadcast usage should be avoided
+ * @xin_id             VBIF xin client-id for LUTDMA
+ * @vbif_idx           VBIF id (RT/NRT)
+ * @clk_ctrl           VBIF xin client clk-ctrl
  */
 struct sde_reg_dma_cfg {
 	SDE_HW_BLK_INFO;
 	u32 version;
 	u32 trigger_sel_off;
 	u32 broadcast_disabled;
+	u32 xin_id;
+	u32 vbif_idx;
+	enum sde_clk_ctrl_type clk_ctrl;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index 0e59734..4f4fe59 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 #include <drm/msm_drm_pp.h>
 #include "sde_hw_mdss.h"
@@ -304,6 +304,13 @@
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
 			c->hw.blk_off + c->hw.length, c->hw.xin_id);
 
+	if ((cfg->sblk->ltm.id == SDE_DSPP_LTM) && cfg->sblk->ltm.base) {
+		sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "LTM",
+				c->hw.blk_off + cfg->sblk->ltm.base,
+				c->hw.blk_off + cfg->sblk->ltm.base + 0xC4,
+				c->hw.xin_id);
+	}
+
 	return c;
 
 blk_init_error:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
index 0a794f8..0c2e025c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
@@ -3443,19 +3443,21 @@
 	/* input param exceeds the display width */
 	if (cfg_param->cfg_param_01 + cfg_param->cfg_param_03 >
 			hw_cfg->displayh) {
-		DRM_ERROR("invalid input param = [%u,%u], displayh = %u\n",
+		DRM_DEBUG_DRIVER("invalid input = [%u,%u], displayh = %u\n",
 			cfg_param->cfg_param_01, cfg_param->cfg_param_03,
 			hw_cfg->displayh);
-		return;
+		/* set the roi width to max register value */
+		cfg_param->cfg_param_03 = 0xFFFF;
 	}
 
 	/* input param exceeds the display height */
 	if (cfg_param->cfg_param_02 + cfg_param->cfg_param_04 >
 			hw_cfg->displayv) {
-		DRM_ERROR("invalid input param = [%u,%u], displayv = %u\n",
+		DRM_DEBUG_DRIVER("invalid input = [%u,%u], displayv = %u\n",
 			cfg_param->cfg_param_02, cfg_param->cfg_param_04,
 			hw_cfg->displayv);
-		return;
+		/* set the roi height to max register value */
+		cfg_param->cfg_param_04 = 0xFFFF;
 	}
 
 	roi_data[0] = ((cfg_param->cfg_param_02 & 0xFFFF) << 16) |
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index 78e0785..5c1aef5 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -994,7 +994,7 @@
 		offset = SSPP_UIDLE_CTRL_VALUE;
 
 	val = SDE_REG_READ(&ctx->hw, offset + idx);
-	val = (val & ~BIT(31)) | (cfg->enable ? BIT(31) : 0x0);
+	val = (val & ~BIT(31)) | (cfg->enable ? 0x0 : BIT(31));
 	val = (val & ~0xFF00000) | (cfg->fal_allowed_threshold << 20);
 	val = (val & ~0xF0000) | (cfg->fal10_exit_threshold << 16);
 	val = (val & ~0xF00) | (cfg->fal10_threshold << 8);
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 973e6d1..c5d9098 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -106,174 +106,6 @@
 }
 
 #ifdef CONFIG_DEBUG_FS
-static int _sde_danger_signal_status(struct seq_file *s,
-		bool danger_status)
-{
-	struct sde_kms *kms = (struct sde_kms *)s->private;
-	struct msm_drm_private *priv;
-	struct sde_danger_safe_status status;
-	int i;
-	int rc;
-
-	if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
-		SDE_ERROR("invalid arg(s)\n");
-		return 0;
-	}
-
-	priv = kms->dev->dev_private;
-	memset(&status, 0, sizeof(struct sde_danger_safe_status));
-
-	rc = sde_power_resource_enable(&priv->phandle, kms->core_client, true);
-	if (rc) {
-		SDE_ERROR("failed to enable power resource %d\n", rc);
-		SDE_EVT32(rc, SDE_EVTLOG_ERROR);
-		return rc;
-	}
-
-	if (danger_status) {
-		seq_puts(s, "\nDanger signal status:\n");
-		if (kms->hw_mdp->ops.get_danger_status)
-			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
-					&status);
-	} else {
-		seq_puts(s, "\nSafe signal status:\n");
-		if (kms->hw_mdp->ops.get_danger_status)
-			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
-					&status);
-	}
-	sde_power_resource_enable(&priv->phandle, kms->core_client, false);
-
-	seq_printf(s, "MDP     :  0x%x\n", status.mdp);
-
-	for (i = SSPP_VIG0; i < SSPP_MAX; i++)
-		seq_printf(s, "SSPP%d   :  0x%x  \t", i - SSPP_VIG0,
-				status.sspp[i]);
-	seq_puts(s, "\n");
-
-	for (i = WB_0; i < WB_MAX; i++)
-		seq_printf(s, "WB%d     :  0x%x  \t", i - WB_0,
-				status.wb[i]);
-	seq_puts(s, "\n");
-
-	return 0;
-}
-
-#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix)				\
-static int __prefix ## _open(struct inode *inode, struct file *file)	\
-{									\
-	return single_open(file, __prefix ## _show, inode->i_private);	\
-}									\
-static const struct file_operations __prefix ## _fops = {		\
-	.owner = THIS_MODULE,						\
-	.open = __prefix ## _open,					\
-	.release = single_release,					\
-	.read = seq_read,						\
-	.llseek = seq_lseek,						\
-}
-
-static int sde_debugfs_danger_stats_show(struct seq_file *s, void *v)
-{
-	return _sde_danger_signal_status(s, true);
-}
-DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_danger_stats);
-
-static int sde_debugfs_safe_stats_show(struct seq_file *s, void *v)
-{
-	return _sde_danger_signal_status(s, false);
-}
-DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_safe_stats);
-
-static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms)
-{
-	debugfs_remove_recursive(sde_kms->debugfs_danger);
-	sde_kms->debugfs_danger = NULL;
-}
-
-static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
-		struct dentry *parent)
-{
-	sde_kms->debugfs_danger = debugfs_create_dir("danger",
-			parent);
-	if (!sde_kms->debugfs_danger) {
-		SDE_ERROR("failed to create danger debugfs\n");
-		return -EINVAL;
-	}
-
-	debugfs_create_file("danger_status", 0400, sde_kms->debugfs_danger,
-			sde_kms, &sde_debugfs_danger_stats_fops);
-	debugfs_create_file("safe_status", 0400, sde_kms->debugfs_danger,
-			sde_kms, &sde_debugfs_safe_stats_fops);
-
-	return 0;
-}
-
-static int _sde_debugfs_show_regset32(struct seq_file *s, void *data)
-{
-	struct sde_debugfs_regset32 *regset;
-	struct sde_kms *sde_kms;
-	struct drm_device *dev;
-	struct msm_drm_private *priv;
-	void __iomem *base;
-	uint32_t i, addr;
-
-	if (!s || !s->private)
-		return 0;
-
-	regset = s->private;
-
-	sde_kms = regset->sde_kms;
-	if (!sde_kms || !sde_kms->mmio)
-		return 0;
-
-	dev = sde_kms->dev;
-	if (!dev)
-		return 0;
-
-	priv = dev->dev_private;
-	if (!priv)
-		return 0;
-
-	base = sde_kms->mmio + regset->offset;
-
-	/* insert padding spaces, if needed */
-	if (regset->offset & 0xF) {
-		seq_printf(s, "[%x]", regset->offset & ~0xF);
-		for (i = 0; i < (regset->offset & 0xF); i += 4)
-			seq_puts(s, "         ");
-	}
-
-	if (sde_power_resource_enable(&priv->phandle,
-				sde_kms->core_client, true)) {
-		seq_puts(s, "failed to enable sde clocks\n");
-		return 0;
-	}
-
-	/* main register output */
-	for (i = 0; i < regset->blk_len; i += 4) {
-		addr = regset->offset + i;
-		if ((addr & 0xF) == 0x0)
-			seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
-		seq_printf(s, " %08x", readl_relaxed(base + i));
-	}
-	seq_puts(s, "\n");
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
-
-	return 0;
-}
-
-static int sde_debugfs_open_regset32(struct inode *inode,
-		struct file *file)
-{
-	return single_open(file, _sde_debugfs_show_regset32, inode->i_private);
-}
-
-static const struct file_operations sde_fops_regset32 = {
-	.open =		sde_debugfs_open_regset32,
-	.read =		seq_read,
-	.llseek =	seq_lseek,
-	.release =	single_release,
-};
-
 void *sde_debugfs_get_root(struct sde_kms *sde_kms)
 {
 	struct msm_drm_private *priv;
@@ -303,7 +135,6 @@
 	/* allow debugfs_root to be NULL */
 	debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0600, debugfs_root, p);
 
-	(void) sde_debugfs_danger_init(sde_kms, debugfs_root);
 	(void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
 	(void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
 
@@ -321,7 +152,6 @@
 	/* don't need to NULL check debugfs_root */
 	if (sde_kms) {
 		sde_debugfs_vbif_destroy(sde_kms);
-		sde_debugfs_danger_destroy(sde_kms);
 		sde_debugfs_core_irq_destroy(sde_kms);
 	}
 }
@@ -3014,6 +2844,25 @@
 	sde_hw_sid_rotator_set(sde_kms->hw_sid);
 }
 
+static void _sde_kms_set_lutdma_vbif_remap(struct sde_kms *sde_kms)
+{
+	struct sde_vbif_set_qos_params qos_params;
+	struct sde_mdss_cfg *catalog;
+
+	if (!sde_kms->catalog)
+		return;
+
+	catalog = sde_kms->catalog;
+
+	memset(&qos_params, 0, sizeof(qos_params));
+	qos_params.vbif_idx = catalog->dma_cfg.vbif_idx;
+	qos_params.xin_id = catalog->dma_cfg.xin_id;
+	qos_params.clk_ctrl = catalog->dma_cfg.clk_ctrl;
+	qos_params.client_type = VBIF_LUTDMA_CLIENT;
+
+	sde_vbif_set_qos_remap(sde_kms, &qos_params);
+}
+
 static void sde_kms_handle_power_event(u32 event_type, void *usr)
 {
 	struct sde_kms *sde_kms = usr;
@@ -3030,6 +2879,7 @@
 		sde_irq_update(msm_kms, true);
 		sde_vbif_init_memtypes(sde_kms);
 		sde_kms_init_shared_hw(sde_kms);
+		_sde_kms_set_lutdma_vbif_remap(sde_kms);
 		sde_kms->first_kickoff = true;
 	} else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
 		sde_irq_update(msm_kms, false);
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 0085410..62da4de 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -243,7 +243,6 @@
 	struct sde_power_event *power_event;
 
 	/* directory entry for debugfs */
-	struct dentry *debugfs_danger;
 	struct dentry *debugfs_vbif;
 
 	/* io/register spaces: */
@@ -423,22 +422,10 @@
  *
  * Documentation/filesystems/debugfs.txt
  *
- * @sde_debugfs_setup_regset32: Initialize data for sde_debugfs_create_regset32
- * @sde_debugfs_create_regset32: Create 32-bit register dump file
  * @sde_debugfs_get_root: Get root dentry for SDE_KMS's debugfs node
  */
 
 /**
- * Companion structure for sde_debugfs_create_regset32. Do not initialize the
- * members of this structure explicitly; use sde_debugfs_setup_regset32 instead.
- */
-struct sde_debugfs_regset32 {
-	uint32_t offset;
-	uint32_t blk_len;
-	struct sde_kms *sde_kms;
-};
-
-/**
  * sde_debugfs_get_root - Return root directory entry for KMS's debugfs
  *
  * The return value should be passed as the 'parent' argument to subsequent
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index d8aa5b5..9ea9c09 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -136,9 +136,6 @@
 
 	/* debugfs related stuff */
 	struct dentry *debugfs_root;
-	struct sde_debugfs_regset32 debugfs_src;
-	struct sde_debugfs_regset32 debugfs_scaler;
-	struct sde_debugfs_regset32 debugfs_csc;
 	bool debugfs_default_scale;
 };
 
@@ -670,12 +667,13 @@
 	qos_params.clk_ctrl = psde->pipe_hw->cap->clk_ctrl;
 	qos_params.xin_id = psde->pipe_hw->cap->xin_id;
 	qos_params.num = psde->pipe_hw->idx - SSPP_VIG0;
-	qos_params.is_rt = psde->is_rt_pipe;
+	qos_params.client_type = psde->is_rt_pipe ?
+					VBIF_RT_CLIENT : VBIF_NRT_CLIENT;
 
 	SDE_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
 			plane->base.id, qos_params.num,
 			qos_params.vbif_idx,
-			qos_params.xin_id, qos_params.is_rt,
+			qos_params.xin_id, qos_params.client_type,
 			qos_params.clk_ctrl);
 
 	sde_vbif_set_qos_remap(sde_kms, &qos_params);
@@ -1604,9 +1602,7 @@
 	struct sde_plane *psde;
 	struct sde_plane_state *pstate, *old_pstate;
 	int ret = 0;
-	const struct msm_format *msm_fmt;
-	const struct sde_format *fmt;
-	u32 height;
+	u32 rotation;
 
 	if (!plane || !state) {
 		SDE_ERROR("invalid plane/state\n");
@@ -1618,27 +1614,38 @@
 	old_pstate = to_sde_plane_state(plane->state);
 
 	/* check inline rotation and simplify the transform */
-	pstate->rotation = drm_rotation_simplify(
+	rotation = drm_rotation_simplify(
 			state->rotation,
 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
 			DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y);
 
-	if ((pstate->rotation & DRM_MODE_ROTATE_180) ||
-		(pstate->rotation & DRM_MODE_ROTATE_270)) {
+	if ((rotation & DRM_MODE_ROTATE_180) ||
+		(rotation & DRM_MODE_ROTATE_270)) {
 		SDE_ERROR_PLANE(psde,
 			"invalid rotation transform must be simplified 0x%x\n",
-			pstate->rotation);
+			rotation);
 		ret = -EINVAL;
 		goto exit;
 	}
 
-	msm_fmt = msm_framebuffer_format(state->fb);
-	fmt = to_sde_format(msm_fmt);
-	height = state->fb ? state->fb->height : 0x0;
-
-	if ((pstate->rotation & DRM_MODE_ROTATE_90)) {
+	if (rotation & DRM_MODE_ROTATE_90) {
 		struct msm_drm_private *priv = plane->dev->dev_private;
 		struct sde_kms *sde_kms;
+		const struct msm_format *msm_fmt;
+		const struct sde_format *fmt;
+		struct sde_rect src;
+		bool q16_data = true;
+
+		msm_fmt = msm_framebuffer_format(state->fb);
+		fmt = to_sde_format(msm_fmt);
+		POPULATE_RECT(&src, state->src_x, state->src_y,
+			state->src_w, state->src_h, q16_data);
+		/*
+		 * DRM framework expects rotation flag in counter-clockwise
+		 * direction and the HW expects in clockwise direction.
+		 * Flip the flags to match with HW.
+		 */
+		rotation ^= (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y);
 
 		if (!psde->pipe_sblk->in_rot_maxdwnscale_rt ||
 			!psde->pipe_sblk->in_rot_maxdwnscale_nrt ||
@@ -1657,25 +1664,22 @@
 		}
 
 		/* check for valid height */
-		if (height > psde->pipe_sblk->in_rot_maxheight) {
+		if (src.h > psde->pipe_sblk->in_rot_maxheight) {
 			SDE_ERROR_PLANE(psde,
 				"invalid height for inline rot:%d max:%d\n",
-				height, psde->pipe_sblk->in_rot_maxheight);
+				src.h, psde->pipe_sblk->in_rot_maxheight);
 			ret = -EINVAL;
 			goto exit;
 		}
 
-		if (!sde_plane_enabled(state))
-			goto exit;
-
 		/* check for valid formats supported by inline rot */
 		sde_kms = to_sde_kms(priv->kms);
 		ret = sde_format_validate_fmt(&sde_kms->base, fmt,
 			psde->pipe_sblk->in_rot_format_list);
-
 	}
 
 exit:
+	pstate->rotation = rotation;
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
index 6fb7b5a..44e3a84 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
@@ -389,9 +389,12 @@
 		return;
 	}
 
-	qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
-			&vbif->cap->qos_nrt_tbl;
+	if (params->client_type > VBIF_MAX_CLIENT) {
+		SDE_ERROR("invalid client type:%d\n", params->client_type);
+		return;
+	}
 
+	qos_tbl = &vbif->cap->qos_tbl[params->client_type];
 	if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
 		SDE_DEBUG("qos tbl not defined\n");
 		return;
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
index a7e7b4a..b16e0c7 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __SDE_VBIF_H__
@@ -50,14 +50,14 @@
  * @xin_id: client interface identifier
  * @clk_ctrl: clock control identifier of the xin
  * @num: pipe identifier (debug only)
- * @is_rt: true if pipe is used in real-time use case
+ * @client_type: client type enumerated by sde_vbif_client_type
  */
 struct sde_vbif_set_qos_params {
 	u32 vbif_idx;
 	u32 xin_id;
 	u32 clk_ctrl;
 	u32 num;
-	bool is_rt;
+	enum sde_vbif_client_type client_type;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index f5b674c..cf04b71 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -30,7 +30,7 @@
 
 #define RSC_MODE_INSTRUCTION_TIME	100
 #define RSC_MODE_THRESHOLD_OVERHEAD	2700
-#define MAX_MODE_0_ENTRY_EXIT_TIME	100
+#define MIN_THRESHOLD_TIME		0
 
 #define DEFAULT_PANEL_FPS		60
 #define DEFAULT_PANEL_JITTER_NUMERATOR	2
@@ -421,9 +421,9 @@
 	/* mode 2 is infinite */
 	rsc->timer_config.rsc_time_slot_2_ns = 0xFFFFFFFF;
 
-	rsc->timer_config.min_threshold_time_ns = MAX_MODE_0_ENTRY_EXIT_TIME;
+	rsc->timer_config.min_threshold_time_ns = MIN_THRESHOLD_TIME;
 	rsc->timer_config.bwi_threshold_time_ns =
-		rsc->single_tcs_execution_time;
+		rsc->timer_config.rsc_time_slot_0_ns;
 
 	/* timer update should be called with client call */
 	if (cmd_config && rsc->hw_ops.timer_update) {
@@ -1464,7 +1464,11 @@
 	rsc->mode_threshold_time_ns = rsc->backoff_time_ns
 					+ RSC_MODE_THRESHOLD_OVERHEAD;
 
-	rsc->time_slot_0_ns = (rsc->single_tcs_execution_time * 2)
+	if (rsc->version == SDE_RSC_REV_3)
+		rsc->time_slot_0_ns = rsc->single_tcs_execution_time
+					+ RSC_MODE_INSTRUCTION_TIME;
+	else
+		rsc->time_slot_0_ns = (rsc->single_tcs_execution_time * 2)
 					+ RSC_MODE_INSTRUCTION_TIME;
 
 	ret = sde_power_resource_init(pdev, &rsc->phandle);
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw_v3.c b/drivers/gpu/drm/msm/sde_rsc_hw_v3.c
index fb0c0e1..4bdc767 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw_v3.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw_v3.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[sde_rsc_hw:%s:%d]: " fmt, __func__, __LINE__
@@ -327,7 +327,15 @@
 	if (rsc->power_collapse_block)
 		return -EINVAL;
 
-	dss_reg_w(&rsc->wrapper_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0,
+	if (rsc->sw_fs_enabled) {
+		rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST);
+		if (rc) {
+			pr_err("vdd reg fast mode set failed rc:%d\n", rc);
+			return rc;
+		}
+	}
+
+	dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0,
 						0x7, rsc->debug_mode);
 
 	for (i = 0; i <= MAX_MODE2_ENTRY_TRY; i++) {
@@ -407,10 +415,15 @@
 	case SDE_RSC_VID_STATE:
 		pr_debug("video mode handling\n");
 
+		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+							0x0, rsc->debug_mode);
+		wmb(); /* disable double buffer config before vsync select */
+
 		ctrl2_config = (rsc->vsync_source & 0x7) << 4;
 		ctrl2_config |= (BIT(0) | BIT(1) | BIT(3));
 		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL2,
 				ctrl2_config, rsc->debug_mode);
+		wmb(); /* select vsync before double buffer config enabled */
 
 		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
 						0x1, rsc->debug_mode);
@@ -511,7 +524,7 @@
 						0x1, rsc->debug_mode);
 
 	bw_ack = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_CTRL2,
-			rsc->debug_mode) & BIT(13);
+			rsc->debug_mode) & BIT(14);
 
 	/* check for sequence running status before exiting */
 	for (count = MAX_CHECK_LOOPS; count > 0 && !bw_ack; count--) {
@@ -520,7 +533,7 @@
 		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_BW_INDICATION,
 						bw_indication, rsc->debug_mode);
 		bw_ack = dss_reg_r(&rsc->wrapper_io,
-		       SDE_RSCC_WRAPPER_DEBUG_CTRL2, rsc->debug_mode) & BIT(13);
+		       SDE_RSCC_WRAPPER_DEBUG_CTRL2, rsc->debug_mode) & BIT(14);
 	}
 
 	if (!bw_ack)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 2abcd7b..f889d41 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1224,8 +1224,16 @@
 static void
 nv50_mstm_init(struct nv50_mstm *mstm)
 {
-	if (mstm && mstm->mgr.mst_state)
-		drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+	int ret;
+
+	if (!mstm || !mstm->mgr.mst_state)
+		return;
+
+	ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+	if (ret == -1) {
+		drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+		drm_kms_helper_hotplug_event(mstm->mgr.dev);
+	}
 }
 
 static void
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index 79d00d8..01ff3c8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -189,12 +189,14 @@
 int rockchip_drm_psr_register(struct drm_encoder *encoder,
 			int (*psr_set)(struct drm_encoder *, bool enable))
 {
-	struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+	struct rockchip_drm_private *drm_drv;
 	struct psr_drv *psr;
 
 	if (!encoder || !psr_set)
 		return -EINVAL;
 
+	drm_drv = encoder->dev->dev_private;
+
 	psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
 	if (!psr)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index f455f09..1b014d9 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -350,15 +350,10 @@
 	if (ret)
 		goto err;
 
-	ret = drm_vblank_init(dev, 1);
-	if (ret)
-		goto err_fb;
-
 	drm_kms_helper_poll_init(dev);
 
 	return 0;
-err_fb:
-	udl_fbdev_cleanup(dev);
+
 err:
 	if (udl->urbs.count)
 		udl_free_urb_list(dev);
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 4db62c5..26470c7 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -71,10 +71,13 @@
 			   V3D_READ(v3d_hub_reg_defs[i].reg));
 	}
 
-	for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
-		seq_printf(m, "%s (0x%04x): 0x%08x\n",
-			   v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg,
-			   V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+	if (v3d->ver < 41) {
+		for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
+			seq_printf(m, "%s (0x%04x): 0x%08x\n",
+				   v3d_gca_reg_defs[i].name,
+				   v3d_gca_reg_defs[i].reg,
+				   V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+		}
 	}
 
 	for (core = 0; core < v3d->cores; core++) {
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index a3275fa..629f404 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -322,6 +322,7 @@
 		if (vc4_state->is_unity)
 			vc4_state->x_scaling[0] = VC4_SCALING_PPF;
 	} else {
+		vc4_state->is_yuv = false;
 		vc4_state->x_scaling[1] = VC4_SCALING_NONE;
 		vc4_state->y_scaling[1] = VC4_SCALING_NONE;
 	}
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 5c75a6a..8819898 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -58,6 +58,7 @@
 #define A6XX_CP_SQE_INSTR_BASE_LO        0x830
 #define A6XX_CP_SQE_INSTR_BASE_HI        0x831
 #define A6XX_CP_MISC_CNTL                0x840
+#define A6XX_CP_APRIV_CNTL               0X844
 #define A6XX_CP_ROQ_THRESHOLDS_1         0x8C1
 #define A6XX_CP_ROQ_THRESHOLDS_2         0x8C2
 #define A6XX_CP_MEM_POOL_SIZE            0x8C3
@@ -825,6 +826,7 @@
 
 #define A6XX_GBIF_PERF_PWR_CNT_EN         0x3cc0
 #define A6XX_GBIF_PERF_CNT_SEL            0x3cc2
+#define A6XX_GBIF_PERF_PWR_CNT_SEL        0x3cc3
 #define A6XX_GBIF_PERF_CNT_LOW0           0x3cc4
 #define A6XX_GBIF_PERF_CNT_LOW1           0x3cc5
 #define A6XX_GBIF_PERF_CNT_LOW2           0x3cc6
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 182aff7..7334c29 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
 #include <linux/module.h>
 #include <linux/uaccess.h>
@@ -2377,7 +2377,10 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	u64 vaddr;
 
-	vaddr = (ADRENO_GPUREV(adreno_dev) >= 500) ? ADRENO_UCHE_GMEM_BASE : 0;
+	if (ADRENO_GPUREV(adreno_dev) >= 500 && !(adreno_is_a650(adreno_dev)))
+		vaddr = ADRENO_UCHE_GMEM_BASE;
+	else
+		vaddr = 0;
 
 	return copy_prop(value, count, &vaddr, sizeof(vaddr));
 }
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index e76a875..a1b6e22 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -283,7 +283,7 @@
 	{A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
 	{A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
 	{A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
-	{A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
+	{A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
 	{A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
 	{A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
 	{A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
@@ -875,14 +875,21 @@
 	kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
 	kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
 
-	/* Program the GMEM VA range for the UCHE path */
-	kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_LO,
-				ADRENO_UCHE_GMEM_BASE);
-	kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0);
-	kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_LO,
-				ADRENO_UCHE_GMEM_BASE +
-				adreno_dev->gmem_size - 1);
-	kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0);
+	/*
+	 * Program the GMEM VA range for the UCHE path.
+	 * From Kona onwards the GMEM VA address is 0, and
+	 * UCHE_GMEM_RANGE registers are no longer used, so we don't
+	 * have to program them.
+	 */
+	if (!adreno_is_a650(adreno_dev)) {
+		kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_LO,
+					ADRENO_UCHE_GMEM_BASE);
+		kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0);
+		kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_LO,
+					ADRENO_UCHE_GMEM_BASE +
+					adreno_dev->gmem_size - 1);
+		kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0);
+	}
 
 	kgsl_regwrite(device, A6XX_UCHE_FILTER_CNTL, 0x804);
 	kgsl_regwrite(device, A6XX_UCHE_CACHE_WAYS, 0x4);
@@ -1287,6 +1294,15 @@
 	if (ret)
 		return ret;
 
+	/*
+	 * Set the RBPRIVLEVEL bit in this register to determine
+	 * the privilege level of ucode executing packets in the RB,
+	 * so we can come out of secure mode and CP does not drop
+	 * the packet.
+	 */
+	if (adreno_is_a650(adreno_dev))
+		kgsl_regwrite(device, A6XX_CP_APRIV_CNTL, (1 << 2));
+
 	/* Clear the SQE_HALT to start the CP engine */
 	kgsl_regwrite(device, A6XX_CP_SQE_CNTL, 1);
 
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 72916a8..9dab3b6 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
 #include <linux/module.h>
 #include <linux/uaccess.h>
@@ -29,10 +29,15 @@
 /* offset of clear register from the power enable register for GBIF*/
 #define GBIF_PWR_CLR_REG_EN_OFF    1
 
+/* offset of select register from the power enable register for GBIF*/
+#define GBIF_PWR_SEL_REG_EN_OFF  3
+
 /* */
-#define GBIF_PERF_RMW_MASK   0xFF
+#define GBIF_PERF_SEL_RMW_MASK   0xFF
 /* */
-#define GBIF_PWR_RMW_MASK    0x10000
+#define GBIF_PWR_SEL_RMW_MASK    0xFF
+/* */
+#define GBIF_PWR_EN_CLR_RMW_MASK 0x10000
 
 /* offset of clear register from the enable register */
 #define VBIF2_PERF_PWR_CLR_REG_EN_OFF 8
@@ -635,7 +640,7 @@
 			perfctr_mask, 0);
 		/* select the desired countable */
 		kgsl_regrmw(device, reg->select,
-			GBIF_PERF_RMW_MASK << shift, countable << shift);
+			GBIF_PERF_SEL_RMW_MASK << shift, countable << shift);
 		/* enable counter */
 		kgsl_regrmw(device, reg->select - GBIF_PERF_EN_REG_SEL_OFF,
 			perfctr_mask, perfctr_mask);
@@ -659,7 +664,8 @@
 }
 
 static void _perfcounter_enable_vbif_pwr(struct adreno_device *adreno_dev,
-		struct adreno_perfcounters *counters, unsigned int counter)
+		struct adreno_perfcounters *counters, unsigned int counter,
+		unsigned int countable)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_perfcount_register *reg;
@@ -667,7 +673,8 @@
 	reg = &counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs[counter];
 
 	if (adreno_has_gbif(adreno_dev)) {
-		unsigned int perfctr_mask = GBIF_PWR_RMW_MASK << counter;
+		unsigned int shift = counter << 3;
+		unsigned int perfctr_mask = GBIF_PWR_EN_CLR_RMW_MASK << counter;
 		/*
 		 * Write 1, followed by 0 to CLR register for
 		 * clearing the counter
@@ -676,6 +683,9 @@
 			perfctr_mask, perfctr_mask);
 		kgsl_regrmw(device, reg->select + GBIF_PWR_CLR_REG_EN_OFF,
 			perfctr_mask, 0);
+		/* select the desired countable */
+		kgsl_regrmw(device, reg->select + GBIF_PWR_SEL_REG_EN_OFF,
+			GBIF_PWR_SEL_RMW_MASK << shift, countable << shift);
 		/* Enable the counter */
 		kgsl_regrmw(device, reg->select, perfctr_mask, perfctr_mask);
 	} else {
@@ -886,7 +896,8 @@
 							countable);
 		break;
 	case KGSL_PERFCOUNTER_GROUP_VBIF_PWR:
-		_perfcounter_enable_vbif_pwr(adreno_dev, counters, counter);
+		_perfcounter_enable_vbif_pwr(adreno_dev, counters, counter,
+							countable);
 		break;
 	case KGSL_PERFCOUNTER_GROUP_SP_PWR:
 	case KGSL_PERFCOUNTER_GROUP_TP_PWR:
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 74eed88..532896a 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
 #include <linux/slab.h>
 #include <linux/sched.h>
@@ -441,7 +441,6 @@
 	struct kgsl_context *context = NULL;
 	bool secured_ctxt = false;
 	static unsigned int _seq_cnt;
-	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
 
 	if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base) &&
 		!is_internal_cmds(flags))
@@ -574,13 +573,8 @@
 		ringcmds += cp_identifier(adreno_dev, ringcmds,
 			CMD_INTERNAL_IDENTIFIER);
 
-	if (gpudev->set_marker) {
-		/* Firmware versions before 1.49 do not support IFPC markers */
-		if (adreno_is_a6xx(adreno_dev) && (fw->version & 0xFFF) < 0x149)
-			ringcmds += gpudev->set_marker(ringcmds, IB1LIST_START);
-		else
-			ringcmds += gpudev->set_marker(ringcmds, IFPC_DISABLE);
-	}
+	if (gpudev->set_marker)
+		ringcmds += gpudev->set_marker(ringcmds, IFPC_DISABLE);
 
 	if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP) {
 		/* Disable protected mode for the fixup */
@@ -694,12 +688,8 @@
 		*ringcmds++ = timestamp;
 	}
 
-	if (gpudev->set_marker) {
-		if (adreno_is_a6xx(adreno_dev) && (fw->version & 0xFFF) < 0x149)
-			ringcmds += gpudev->set_marker(ringcmds, IB1LIST_END);
-		else
-			ringcmds += gpudev->set_marker(ringcmds, IFPC_ENABLE);
-	}
+	if (gpudev->set_marker)
+		ringcmds += gpudev->set_marker(ringcmds, IFPC_ENABLE);
 
 	if (adreno_is_a3xx(adreno_dev)) {
 		/* Dummy set-constant to trigger context rollover */
@@ -856,7 +846,6 @@
 	struct adreno_ringbuffer *rb;
 	unsigned int dwords = 0;
 	struct adreno_submit_time local;
-	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
 	bool set_ib1list_marker = false;
 
 	memset(&local, 0x0, sizeof(local));
@@ -967,13 +956,7 @@
 		if (gpudev->preemption_yield_enable)
 			dwords += 8;
 
-	/*
-	 * Prior to SQE FW version 1.49, there was only one marker for
-	 * both preemption and IFPC. Only include the IB1LIST markers if
-	 * we are using a firmware that supports them.
-	 */
-	if (gpudev->set_marker && numibs && adreno_is_a6xx(adreno_dev) &&
-			((fw->version & 0xFFF) >= 0x149)) {
+	if (gpudev->set_marker && numibs) {
 		set_ib1list_marker = true;
 		dwords += 4;
 	}
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 7ea2f29..90f49cf 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -833,7 +833,7 @@
 	adreno_read_gmureg(ADRENO_DEVICE(device),
 			ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status);
 	adreno_write_gmureg(ADRENO_DEVICE(device),
-			ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status);
+			ADRENO_REG_GMU_GMU2HOST_INTR_CLR, HFI_IRQ_MASK);
 
 	if (status & HFI_IRQ_DBGQ_MASK)
 		tasklet_hi_schedule(&hfi->tasklet);
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index 47a8dbf..799c103 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/export.h>
@@ -304,12 +304,8 @@
 
 	entry = kgsl_sharedmem_find(process, gpuaddr);
 
-	if (entry == NULL) {
-		dev_err(snapshot->device->dev,
-			"snapshot: unable to find GPU buffer 0x%016llx\n",
-			gpuaddr);
+	if (entry == NULL)
 		return -EINVAL;
-	}
 
 	/* We can't freeze external memory, because we don't own it */
 	if (entry->memdesc.flags & KGSL_MEMFLAGS_USERMEM_MASK)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 46182d4..b7870e7 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -17,6 +17,9 @@
 #ifndef HID_IDS_H_FILE
 #define HID_IDS_H_FILE
 
+#define USB_VENDOR_ID_258A		0x258a
+#define USB_DEVICE_ID_258A_6A88		0x6a88
+
 #define USB_VENDOR_ID_3M		0x0596
 #define USB_DEVICE_ID_3M1968		0x0500
 #define USB_DEVICE_ID_3M2256		0x0502
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 1882a4a..98b059d 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -42,6 +42,7 @@
 
 static const struct hid_device_id ite_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, ite_devices);
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 97954f5..1c1a251 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -4,7 +4,7 @@
 
 config HYPERV
 	tristate "Microsoft Hyper-V client drivers"
-	depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST
+	depends on X86 && ACPI && X86_LOCAL_APIC && HYPERVISOR_GUEST
 	select PARAVIRT
 	help
 	  Select this option to run Linux as a Hyper-V client operating
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b1b7880..d2a735ac9 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -888,12 +888,14 @@
 			pfn_cnt -= pgs_ol;
 			/*
 			 * Check if the corresponding memory block is already
-			 * online by checking its last previously backed page.
-			 * In case it is we need to bring rest (which was not
-			 * backed previously) online too.
+			 * online. It is possible to observe struct pages still
+			 * being uninitialized here so check section instead.
+			 * In case the section is online we need to bring the
+			 * rest of pfns (which were not backed previously)
+			 * online too.
 			 */
 			if (start_pfn > has->start_pfn &&
-			    !PageReserved(pfn_to_page(start_pfn - 1)))
+			    online_section_nr(pfn_to_section_nr(start_pfn)))
 				hv_bring_pgs_online(has, start_pfn, pgs_ol);
 
 		}
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 3e90eb9..6cb45f2 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -164,26 +164,25 @@
 }
 
 /* Get various debug metrics for the specified ring buffer. */
-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
-				 struct hv_ring_buffer_debug_info *debug_info)
+int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+				struct hv_ring_buffer_debug_info *debug_info)
 {
 	u32 bytes_avail_towrite;
 	u32 bytes_avail_toread;
 
-	if (ring_info->ring_buffer) {
-		hv_get_ringbuffer_availbytes(ring_info,
-					&bytes_avail_toread,
-					&bytes_avail_towrite);
+	if (!ring_info->ring_buffer)
+		return -EINVAL;
 
-		debug_info->bytes_avail_toread = bytes_avail_toread;
-		debug_info->bytes_avail_towrite = bytes_avail_towrite;
-		debug_info->current_read_index =
-			ring_info->ring_buffer->read_index;
-		debug_info->current_write_index =
-			ring_info->ring_buffer->write_index;
-		debug_info->current_interrupt_mask =
-			ring_info->ring_buffer->interrupt_mask;
-	}
+	hv_get_ringbuffer_availbytes(ring_info,
+				     &bytes_avail_toread,
+				     &bytes_avail_towrite);
+	debug_info->bytes_avail_toread = bytes_avail_toread;
+	debug_info->bytes_avail_towrite = bytes_avail_towrite;
+	debug_info->current_read_index = ring_info->ring_buffer->read_index;
+	debug_info->current_write_index = ring_info->ring_buffer->write_index;
+	debug_info->current_interrupt_mask
+		= ring_info->ring_buffer->interrupt_mask;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
 
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index c71cc85..9aa18f3 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -313,10 +313,16 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info outbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+					  &outbound);
+	if (ret < 0)
+		return ret;
+
 	return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
 }
 static DEVICE_ATTR_RO(out_intr_mask);
@@ -326,10 +332,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info outbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+					  &outbound);
+	if (ret < 0)
+		return ret;
 	return sprintf(buf, "%d\n", outbound.current_read_index);
 }
 static DEVICE_ATTR_RO(out_read_index);
@@ -340,10 +351,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info outbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+					  &outbound);
+	if (ret < 0)
+		return ret;
 	return sprintf(buf, "%d\n", outbound.current_write_index);
 }
 static DEVICE_ATTR_RO(out_write_index);
@@ -354,10 +370,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info outbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+					  &outbound);
+	if (ret < 0)
+		return ret;
 	return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
 }
 static DEVICE_ATTR_RO(out_read_bytes_avail);
@@ -368,10 +389,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info outbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
+					  &outbound);
+	if (ret < 0)
+		return ret;
 	return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
 }
 static DEVICE_ATTR_RO(out_write_bytes_avail);
@@ -381,10 +407,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info inbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+	if (ret < 0)
+		return ret;
+
 	return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
 }
 static DEVICE_ATTR_RO(in_intr_mask);
@@ -394,10 +425,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info inbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+	if (ret < 0)
+		return ret;
+
 	return sprintf(buf, "%d\n", inbound.current_read_index);
 }
 static DEVICE_ATTR_RO(in_read_index);
@@ -407,10 +443,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info inbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+	if (ret < 0)
+		return ret;
+
 	return sprintf(buf, "%d\n", inbound.current_write_index);
 }
 static DEVICE_ATTR_RO(in_write_index);
@@ -421,10 +462,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info inbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+	if (ret < 0)
+		return ret;
+
 	return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
 }
 static DEVICE_ATTR_RO(in_read_bytes_avail);
@@ -435,10 +481,15 @@
 {
 	struct hv_device *hv_dev = device_to_hv_device(dev);
 	struct hv_ring_buffer_debug_info inbound;
+	int ret;
 
 	if (!hv_dev->channel)
 		return -ENODEV;
-	hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+
+	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
+	if (ret < 0)
+		return ret;
+
 	return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
 }
 static DEVICE_ATTR_RO(in_write_bytes_avail);
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index be56a8a..389ce41 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -14,6 +14,7 @@
 #include <linux/of.h>
 #include <linux/coresight.h>
 #include <linux/regulator/consumer.h>
+#include <soc/qcom/scm.h>
 
 #include "coresight-priv.h"
 
@@ -131,6 +132,8 @@
 #define TPDM_REVISION_A		0
 #define TPDM_REVISION_B		1
 
+#define HW_ENABLE_CHECK_VALUE   0x10
+
 enum tpdm_dataset {
 	TPDM_DS_IMPLDEF,
 	TPDM_DS_DSB,
@@ -938,18 +941,11 @@
 	/* Init the default data */
 	tpdm_init_default_data(drvdata);
 
-	/* Disable tpdm if enabled */
-	if (drvdata->enable) {
-		__tpdm_disable(drvdata);
-		drvdata->enable = false;
-	}
-
 	mutex_unlock(&drvdata->lock);
 
-	if (drvdata->enable) {
-		tpdm_setup_disable(drvdata);
-		dev_info(drvdata->dev, "TPDM tracing disabled\n");
-	}
+	/* Disable tpdm if enabled */
+	if (drvdata->enable)
+		coresight_disable(drvdata->csdev);
 
 	return size;
 }
@@ -4375,12 +4371,22 @@
 	struct coresight_desc *desc;
 	static int traceid = TPDM_TRACE_ID_START;
 	uint32_t version;
+	struct scm_desc des = {0};
+	u32 scm_ret = 0;
 
 	pdata = of_get_coresight_platform_data(dev, adev->dev.of_node);
 	if (IS_ERR(pdata))
 		return PTR_ERR(pdata);
 	adev->dev.platform_data = pdata;
 
+	if (of_property_read_bool(adev->dev.of_node, "qcom,hw-enable-check")) {
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_UTIL,
+				HW_ENABLE_CHECK_VALUE), &des);
+		scm_ret = des.ret[0];
+		if (scm_ret == 0)
+			return -ENXIO;
+	}
+
 	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
 	if (!drvdata)
 		return -ENOMEM;
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 35f0bf7..57d87bd 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -18,6 +18,7 @@
 #include <linux/of_platform.h>
 #include <linux/delay.h>
 #include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
 
 #include "coresight-priv.h"
 
@@ -111,6 +112,53 @@
 	bus_for_each_dev(&coresight_bustype, NULL, NULL, coresight_reset_sink);
 }
 
+void coresight_enable_reg_clk(struct coresight_device *csdev)
+{
+	struct coresight_reg_clk *reg_clk = csdev->reg_clk;
+	int ret;
+	int i, j;
+
+	if (IS_ERR_OR_NULL(reg_clk))
+		return;
+
+	for (i = 0; i < reg_clk->nr_reg; i++) {
+		ret = regulator_enable(reg_clk->reg[i]);
+		if (ret)
+			goto err_regs;
+	}
+
+	for (j = 0; j < reg_clk->nr_clk; j++) {
+		ret = clk_prepare_enable(reg_clk->clk[j]);
+		if (ret)
+			goto err_clks;
+	}
+
+	return;
+
+err_clks:
+	for (j--; j >= 0; j--)
+		clk_disable_unprepare(reg_clk->clk[j]);
+err_regs:
+	for (i--; i >= 0; i--)
+		regulator_disable(reg_clk->reg[i]);
+}
+EXPORT_SYMBOL(coresight_enable_reg_clk);
+
+void coresight_disable_reg_clk(struct coresight_device *csdev)
+{
+	struct coresight_reg_clk *reg_clk = csdev->reg_clk;
+	int i;
+
+	if (IS_ERR_OR_NULL(reg_clk))
+		return;
+
+	for (i = reg_clk->nr_clk - 1; i >= 0; i--)
+		clk_disable_unprepare(reg_clk->clk[i]);
+	for (i = reg_clk->nr_reg - 1; i >= 0; i--)
+		regulator_disable(reg_clk->reg[i]);
+}
+EXPORT_SYMBOL(coresight_disable_reg_clk);
+
 static int coresight_find_link_inport(struct coresight_device *csdev,
 				      struct coresight_device *parent,
 				      struct list_head *path)
@@ -159,9 +207,12 @@
 
 	if (!csdev->enable) {
 		if (sink_ops(csdev)->enable) {
+			coresight_enable_reg_clk(csdev);
 			ret = sink_ops(csdev)->enable(csdev, mode);
-			if (ret)
+			if (ret) {
+				coresight_disable_reg_clk(csdev);
 				return ret;
+			}
 		}
 		csdev->enable = true;
 	}
@@ -176,6 +227,7 @@
 	if (atomic_dec_return(csdev->refcnt) == 0) {
 		if (sink_ops(csdev)->disable) {
 			sink_ops(csdev)->disable(csdev);
+			coresight_disable_reg_clk(csdev);
 			csdev->enable = false;
 			csdev->activated = false;
 		}
@@ -210,8 +262,10 @@
 
 	if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
 		if (link_ops(csdev)->enable) {
+			coresight_enable_reg_clk(csdev);
 			ret = link_ops(csdev)->enable(csdev, inport, outport);
 			if (ret) {
+				coresight_disable_reg_clk(csdev);
 				atomic_dec(&csdev->refcnt[refport]);
 				return ret;
 			}
@@ -251,8 +305,10 @@
 	}
 
 	if (atomic_dec_return(&csdev->refcnt[refport]) == 0) {
-		if (link_ops(csdev)->disable)
+		if (link_ops(csdev)->disable) {
 			link_ops(csdev)->disable(csdev, inport, outport);
+			coresight_disable_reg_clk(csdev);
+		}
 	}
 
 	for (i = 0; i < nr_conns; i++)
@@ -274,9 +330,12 @@
 
 	if (!csdev->enable) {
 		if (source_ops(csdev)->enable) {
+			coresight_enable_reg_clk(csdev);
 			ret = source_ops(csdev)->enable(csdev, NULL, mode);
-			if (ret)
+			if (ret) {
+				coresight_disable_reg_clk(csdev);
 				return ret;
+			}
 		}
 		csdev->enable = true;
 	}
@@ -297,8 +356,10 @@
 static bool coresight_disable_source(struct coresight_device *csdev)
 {
 	if (atomic_dec_return(csdev->refcnt) == 0) {
-		if (source_ops(csdev)->disable)
+		if (source_ops(csdev)->disable) {
 			source_ops(csdev)->disable(csdev, NULL);
+			coresight_disable_reg_clk(csdev);
+		}
 		csdev->enable = false;
 	}
 	return !csdev->enable;
@@ -1195,6 +1256,7 @@
 	csdev->subtype = desc->subtype;
 	csdev->ops = desc->ops;
 	csdev->orphan = false;
+	csdev->reg_clk = desc->pdata->reg_clk;
 
 	csdev->dev.type = &coresight_dev_type[desc->type];
 	csdev->dev.groups = desc->groups;
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index dc6f111..29dbf52 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -122,6 +122,59 @@
 }
 EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
 
+static struct coresight_reg_clk *
+of_coresight_get_reg_clk(struct device *dev, const struct device_node *node)
+{
+	struct coresight_reg_clk *reg_clk;
+	const char *clk_name, *reg_name;
+	int nr_reg, nr_clk, i, ret;
+
+	nr_reg = of_property_count_strings(node, "qcom,proxy-regs");
+	nr_clk = of_property_count_strings(node, "qcom,proxy-clks");
+	if (!nr_reg && !nr_clk)
+		return NULL;
+
+	reg_clk = devm_kzalloc(dev, sizeof(*reg_clk), GFP_KERNEL);
+	if (!reg_clk)
+		return ERR_PTR(-ENOMEM);
+
+	reg_clk->nr_reg = nr_reg;
+	reg_clk->nr_clk = nr_clk;
+	if (nr_reg > 0) {
+		reg_clk->reg = devm_kzalloc(dev, nr_reg *
+			sizeof(reg_clk->reg), GFP_KERNEL);
+		if (!reg_clk->reg)
+			return ERR_PTR(-ENOMEM);
+
+		for (i = 0; i < nr_reg; i++) {
+			ret = of_property_read_string_index(node,
+				"qcom,proxy-regs", i, &reg_name);
+			if (ret)
+				return ERR_PTR(ret);
+			reg_clk->reg[i] = devm_regulator_get(dev, reg_name);
+			if (IS_ERR(reg_clk->reg[i]))
+				return ERR_PTR(-EINVAL);
+		}
+	}
+	if (nr_clk > 0) {
+		reg_clk->clk = devm_kzalloc(dev, nr_clk *
+			sizeof(reg_clk->clk), GFP_KERNEL);
+		if (!reg_clk->clk)
+			return ERR_PTR(-ENOMEM);
+
+		for (i = 0; i < nr_clk; i++) {
+			ret = of_property_read_string_index(node,
+				"qcom,proxy-clks", i, &clk_name);
+			if (ret)
+				return ERR_PTR(ret);
+			reg_clk->clk[i] = devm_clk_get(dev, clk_name);
+			if (IS_ERR(reg_clk->clk[i]))
+				return ERR_PTR(-EINVAL);
+		}
+	}
+	return reg_clk;
+}
+
 struct coresight_platform_data *
 of_get_coresight_platform_data(struct device *dev,
 			       const struct device_node *node)
@@ -212,6 +265,10 @@
 
 	pdata->cpu = of_coresight_get_cpu(node);
 
+	pdata->reg_clk = of_coresight_get_reg_clk(dev, node);
+	if (IS_ERR(pdata->reg_clk))
+		return (void *)(pdata->reg_clk);
+
 	return pdata;
 }
 EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index d293e55..ba7aaf4 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1423,7 +1423,8 @@
 		if (!end)
 			break;
 
-		len -= end - p;
+		/* consume the number and the following comma, hence +1 */
+		len -= end - p + 1;
 		p = end + 1;
 	} while (len);
 
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 1aca742..ccd76c7 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -470,9 +470,15 @@
 					  data_arg.data);
 	}
 	case I2C_RETRIES:
+		if (arg > INT_MAX)
+			return -EINVAL;
+
 		client->adapter->retries = arg;
 		break;
 	case I2C_TIMEOUT:
+		if (arg > INT_MAX)
+			return -EINVAL;
+
 		/* For historical reasons, user-space sets the timeout
 		 * value in units of 10 ms.
 		 */
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 45c9974..0e51803 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -544,7 +544,7 @@
 		drive->proc = proc_mkdir(drive->name, parent);
 		if (drive->proc) {
 			ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
-			proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR,
+			proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
 					drive->proc, &ide_settings_proc_fops,
 					drive);
 		}
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 2ddbfc3..cba62ad2 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -124,7 +124,8 @@
 		mutex_unlock(&indio_dev->mlock);
 		if (ret < 0)
 			return ret;
-		*val = ret;
+		*val = (ret >> chan->scan_type.shift) &
+			GENMASK(chan->scan_type.realbits - 1, 0);
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_SCALE:
 		*val = st->vref_mv;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 0385ab4..f6fa9b1 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -579,10 +579,6 @@
 	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
 			      atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
 		goto err;
-	if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
-	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
-			pd->unsafe_global_rkey))
-		goto err;
 
 	if (fill_res_name_pid(msg, res))
 		goto err;
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 25d43c8..558de0b 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -267,6 +267,9 @@
 	struct net_device *cookie_ndev = cookie;
 	bool match = false;
 
+	if (!rdma_ndev)
+		return false;
+
 	rcu_read_lock();
 	if (netif_is_bond_master(cookie_ndev) &&
 	    rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 1fc7564..34ffca6 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -488,7 +488,7 @@
 		vmf = 1;
 		break;
 	case STATUS:
-		if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
+		if (flags & VM_WRITE) {
 			ret = -EPERM;
 			goto done;
 		}
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 3dfb4cf..48692ad 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1141,6 +1141,8 @@
 
 				if (slen > len)
 					slen = len;
+				if (slen > ss->sge.sge_length)
+					slen = ss->sge.sge_length;
 				rvt_update_sge(ss, slen, false);
 				seg_pio_copy_mid(pbuf, addr, slen);
 				len -= slen;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index f2f11e6..02f36ab 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -857,7 +857,9 @@
 
 	err = uverbs_get_flags32(&access, attrs,
 				 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
-				 IB_ACCESS_SUPPORTED);
+				 IB_ACCESS_LOCAL_WRITE |
+				 IB_ACCESS_REMOTE_WRITE |
+				 IB_ACCESS_REMOTE_READ);
 	if (err)
 		return err;
 
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 9973ac8..3db2324 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -334,13 +334,16 @@
 
 	usnic_dbg("\n");
 
-	mutex_lock(&us_ibdev->usdev_lock);
 	if (ib_get_eth_speed(ibdev, port, &props->active_speed,
-			     &props->active_width)) {
-		mutex_unlock(&us_ibdev->usdev_lock);
+			     &props->active_width))
 		return -EINVAL;
-	}
 
+	/*
+	 * usdev_lock is acquired after (and not before) ib_get_eth_speed call
+	 * because acquiring rtnl_lock in ib_get_eth_speed, while holding
+	 * usdev_lock could lead to a deadlock.
+	 */
+	mutex_lock(&us_ibdev->usdev_lock);
 	/* props being zeroed by the caller, avoid zeroing it here */
 
 	props->lid = 0;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 42b8685..3c633ab 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -427,7 +427,40 @@
 
 static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
 {
-	return (enum pvrdma_wr_opcode)op;
+	switch (op) {
+	case IB_WR_RDMA_WRITE:
+		return PVRDMA_WR_RDMA_WRITE;
+	case IB_WR_RDMA_WRITE_WITH_IMM:
+		return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
+	case IB_WR_SEND:
+		return PVRDMA_WR_SEND;
+	case IB_WR_SEND_WITH_IMM:
+		return PVRDMA_WR_SEND_WITH_IMM;
+	case IB_WR_RDMA_READ:
+		return PVRDMA_WR_RDMA_READ;
+	case IB_WR_ATOMIC_CMP_AND_SWP:
+		return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
+	case IB_WR_ATOMIC_FETCH_AND_ADD:
+		return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
+	case IB_WR_LSO:
+		return PVRDMA_WR_LSO;
+	case IB_WR_SEND_WITH_INV:
+		return PVRDMA_WR_SEND_WITH_INV;
+	case IB_WR_RDMA_READ_WITH_INV:
+		return PVRDMA_WR_RDMA_READ_WITH_INV;
+	case IB_WR_LOCAL_INV:
+		return PVRDMA_WR_LOCAL_INV;
+	case IB_WR_REG_MR:
+		return PVRDMA_WR_FAST_REG_MR;
+	case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+		return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
+	case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+		return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
+	case IB_WR_REG_SIG_MR:
+		return PVRDMA_WR_REG_SIG_MR;
+	default:
+		return PVRDMA_WR_ERROR;
+	}
 }
 
 static inline enum ib_wc_status pvrdma_wc_status_to_ib(
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 60083c0..9aeb330 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -721,6 +721,12 @@
 		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
 			wqe_hdr->ex.imm_data = wr->ex.imm_data;
 
+		if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
+			*bad_wr = wr;
+			ret = -EINVAL;
+			goto out;
+		}
+
 		switch (qp->ibqp.qp_type) {
 		case IB_QPT_GSI:
 		case IB_QPT_UD:
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 8be2723..fa98a52 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -640,6 +640,7 @@
 			rmr->access = wqe->wr.wr.reg.access;
 			rmr->lkey = wqe->wr.wr.reg.key;
 			rmr->rkey = wqe->wr.wr.reg.key;
+			rmr->iova = wqe->wr.wr.reg.mr->iova;
 			wqe->state = wqe_state_done;
 			wqe->status = IB_WC_SUCCESS;
 		} else {
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index fc6c880..4111b79 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -844,11 +844,16 @@
 
 	memset(&cqe, 0, sizeof(cqe));
 
-	wc->wr_id		= wqe->wr_id;
-	wc->status		= qp->resp.status;
-	wc->qp			= &qp->ibqp;
+	if (qp->rcq->is_user) {
+		uwc->status             = qp->resp.status;
+		uwc->qp_num             = qp->ibqp.qp_num;
+		uwc->wr_id              = wqe->wr_id;
+	} else {
+		wc->status              = qp->resp.status;
+		wc->qp                  = &qp->ibqp;
+		wc->wr_id               = wqe->wr_id;
+	}
 
-	/* fields after status are not required for errors */
 	if (wc->status == IB_WC_SUCCESS) {
 		wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
 				pkt->mask & RXE_WRITE_MASK) ?
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index f37cbad..f4bce5a 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2009,6 +2009,14 @@
 	kfree_rcu(ch, rcu);
 }
 
+/*
+ * Shut down the SCSI target session, tell the connection manager to
+ * disconnect the associated RDMA channel, transition the QP to the error
+ * state and remove the channel from the channel list. This function is
+ * typically called from inside srpt_zerolength_write_done(). Concurrent
+ * srpt_zerolength_write() calls from inside srpt_close_ch() are possible
+ * as long as the channel is on sport->nexus_list.
+ */
 static void srpt_release_channel_work(struct work_struct *w)
 {
 	struct srpt_rdma_ch *ch;
@@ -2036,6 +2044,11 @@
 	else
 		ib_destroy_cm_id(ch->ib_cm.cm_id);
 
+	sport = ch->sport;
+	mutex_lock(&sport->mutex);
+	list_del_rcu(&ch->list);
+	mutex_unlock(&sport->mutex);
+
 	srpt_destroy_ch_ib(ch);
 
 	srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2046,11 +2059,6 @@
 			     sdev, ch->rq_size,
 			     srp_max_req_size, DMA_FROM_DEVICE);
 
-	sport = ch->sport;
-	mutex_lock(&sport->mutex);
-	list_del_rcu(&ch->list);
-	mutex_unlock(&sport->mutex);
-
 	wake_up(&sport->ch_releaseQ);
 
 	kref_put(&ch->kref, srpt_free_ch);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index cfc8b94..aa4e431 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -252,6 +252,8 @@
 	{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
 	{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
 	{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
+	{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+	{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
 	{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
 	{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
 	{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
@@ -428,6 +430,7 @@
 	XPAD_XBOXONE_VENDOR(0x0e6f),		/* 0x0e6f X-Box One controllers */
 	XPAD_XBOX360_VENDOR(0x0f0d),		/* Hori Controllers */
 	XPAD_XBOXONE_VENDOR(0x0f0d),		/* Hori Controllers */
+	XPAD_XBOX360_VENDOR(0x1038),		/* SteelSeries Controllers */
 	XPAD_XBOX360_VENDOR(0x11c9),		/* Nacon GC100XF */
 	XPAD_XBOX360_VENDOR(0x12ab),		/* X-Box 360 dance pads */
 	XPAD_XBOX360_VENDOR(0x1430),		/* RedOctane X-Box 360 controllers */
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index a7dc286..840e537 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -126,12 +126,8 @@
 {
 	struct omap4_keypad *keypad_data = dev_id;
 
-	if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)) {
-		/* Disable interrupts */
-		kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
-				 OMAP4_VAL_IRQDISABLE);
+	if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS))
 		return IRQ_WAKE_THREAD;
-	}
 
 	return IRQ_NONE;
 }
@@ -173,11 +169,6 @@
 	kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
 			 kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
 
-	/* enable interrupts */
-	kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
-		OMAP4_DEF_IRQENABLE_EVENTEN |
-				OMAP4_DEF_IRQENABLE_LONGKEY);
-
 	return IRQ_HANDLED;
 }
 
@@ -214,9 +205,10 @@
 
 	disable_irq(keypad_data->irq);
 
-	/* Disable interrupts */
+	/* Disable interrupts and wake-up events */
 	kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
 			 OMAP4_VAL_IRQDISABLE);
+	kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE, 0);
 
 	/* clear pending interrupts */
 	kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
@@ -365,7 +357,7 @@
 	}
 
 	error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler,
-				     omap4_keypad_irq_thread_fn, 0,
+				     omap4_keypad_irq_thread_fn, IRQF_ONESHOT,
 				     "omap4-keypad", keypad_data);
 	if (error) {
 		dev_err(&pdev->dev, "failed to register interrupt\n");
diff --git a/drivers/input/misc/qti-haptics.c b/drivers/input/misc/qti-haptics.c
index fab47fc..5e5274a 100644
--- a/drivers/input/misc/qti-haptics.c
+++ b/drivers/input/misc/qti-haptics.c
@@ -179,6 +179,7 @@
 	int			brake_pattern_length;
 	bool			brake_en;
 	bool			lra_auto_res_disable;
+	enum wf_src		wf_src;
 };
 
 struct qti_hap_play_info {
@@ -193,11 +194,9 @@
 	enum actutor_type	act_type;
 	enum lra_res_sig_shape	lra_shape;
 	enum lra_auto_res_mode	lra_auto_res_mode;
-	enum wf_src		ext_src;
 	u16			vmax_mv;
 	u16			play_rate_us;
 	bool			lra_allow_variable_play_rate;
-	bool			use_ext_wf_src;
 };
 
 struct qti_hap_chip {
@@ -205,7 +204,6 @@
 	struct device			*dev;
 	struct regmap			*regmap;
 	struct input_dev		*input_dev;
-	struct pwm_device		*pwm_dev;
 	struct qti_hap_config		config;
 	struct qti_hap_play_info	play;
 	struct qti_hap_effect		*predefined;
@@ -228,6 +226,7 @@
 
 static int wf_repeat[8] = {1, 2, 4, 8, 16, 32, 64, 128};
 static int wf_s_repeat[4] = {1, 2, 4, 8};
+const static char * const wf_src_str[] = {"vmax", "buffer", "audio", "pwm"};
 
 static inline bool is_secure(u8 addr)
 {
@@ -398,6 +397,11 @@
 	int rc = 0;
 	size_t len;
 
+	if (effect->pattern == NULL) {
+		dev_dbg(chip->dev, "no pattern for effect %d\n", effect->id);
+		return 0;
+	}
+
 	if (play->playing_pos == effect->pattern_length) {
 		dev_dbg(chip->dev, "pattern playing done\n");
 		return 0;
@@ -493,11 +497,8 @@
 	int rc;
 
 	addr = REG_HAP_SEL;
-	mask = HAP_WF_SOURCE_MASK | HAP_WF_TRIGGER_BIT;
+	mask = HAP_WF_SOURCE_MASK;
 	val = src << HAP_WF_SOURCE_SHIFT;
-	if (src == EXT_WF_AUDIO || src == EXT_WF_PWM)
-		val |= HAP_WF_TRIGGER_BIT;
-
 	rc = qti_haptics_masked_write(chip, addr, mask, val);
 	if (rc < 0)
 		dev_err(chip->dev, "set HAP_SEL failed, rc=%d\n", rc);
@@ -704,20 +705,22 @@
 	if (rc < 0)
 		return rc;
 
-	rc = qti_haptics_config_wf_buffer(chip);
+	/* Set corresponding WF_SOURCE */
+	rc = qti_haptics_config_wf_src(chip, play->effect->wf_src);
 	if (rc < 0)
 		return rc;
 
-	rc = qti_haptics_config_wf_repeat(chip);
-	if (rc < 0)
-		return rc;
+	if (play->effect->wf_src == INT_WF_BUFFER) {
+		rc = qti_haptics_config_wf_buffer(chip);
+		if (rc < 0)
+			return rc;
 
-	/* Set WF_SOURCE to buffer */
-	rc = qti_haptics_config_wf_src(chip, INT_WF_BUFFER);
-	if (rc < 0)
-		return rc;
+		rc = qti_haptics_config_wf_repeat(chip);
+		if (rc < 0)
+			return rc;
 
-	play->playing_pattern = true;
+		play->playing_pattern = true;
+	}
 
 	return 0;
 }
@@ -812,6 +815,17 @@
 	struct qti_hap_effect *effect = play->effect;
 	int tmp;
 
+	/*
+	 * Return play_length to 0 if playing LINE-IN signal,
+	 * the playing has to be stopped explicitly from the
+	 * requester.
+	 */
+	if (effect->wf_src == EXT_WF_PWM ||
+			effect->wf_src == EXT_WF_AUDIO) {
+		*length_us = 0;
+		return;
+	}
+
 	tmp = effect->pattern_length * effect->play_rate_us;
 	tmp *= wf_s_repeat[effect->wf_s_repeat_n];
 	tmp *= wf_repeat[effect->wf_repeat_n];
@@ -961,11 +975,15 @@
 				disable_irq_nosync(chip->play_irq);
 				chip->play_irq_en = false;
 			}
-			secs = play->length_us / USEC_PER_SEC;
-			nsecs = (play->length_us % USEC_PER_SEC) *
-				NSEC_PER_USEC;
-			hrtimer_start(&chip->stop_timer, ktime_set(secs, nsecs),
-					HRTIMER_MODE_REL);
+
+			if (play->length_us != 0) {
+				secs = play->length_us / USEC_PER_SEC;
+				nsecs = (play->length_us % USEC_PER_SEC) *
+					NSEC_PER_USEC;
+				hrtimer_start(&chip->stop_timer,
+						ktime_set(secs, nsecs),
+						HRTIMER_MODE_REL);
+			}
 		}
 	} else {
 		play->length_us = 0;
@@ -1085,19 +1103,15 @@
 	if (rc < 0)
 		return rc;
 
-	/* Set external waveform source if it's used */
-	if (config->use_ext_wf_src) {
-		rc = qti_haptics_config_wf_src(chip, config->ext_src);
-		if (rc < 0)
-			return rc;
-	}
-
 	/*
 	 * Skip configurations below for ERM actuator
 	 * as they're only for LRA actuators
 	 */
-	if (config->act_type == ACT_ERM)
-		return 0;
+	if (config->act_type == ACT_ERM) {
+		/* Disable AUTO_RES for ERM */
+		rc = qti_haptics_lra_auto_res_enable(chip, false);
+		return rc;
+	}
 
 	addr = REG_HAP_CFG2;
 	val = config->lra_shape;
@@ -1206,6 +1220,61 @@
 			effect->vmax_mv = (tmp > HAP_VMAX_MV_MAX) ?
 				HAP_VMAX_MV_MAX : tmp;
 
+		effect->play_rate_us = config->play_rate_us;
+		rc = of_property_read_u32(child_node, "qcom,wf-play-rate-us",
+				&tmp);
+		if (rc < 0)
+			dev_dbg(chip->dev, "Read qcom,wf-play-rate-us failed, rc=%d\n",
+					rc);
+		else
+			effect->play_rate_us = tmp;
+
+		if (config->act_type == ACT_LRA &&
+				!config->lra_allow_variable_play_rate &&
+				config->play_rate_us != effect->play_rate_us) {
+			dev_warn(chip->dev, "play rate should match with LRA resonance frequency\n");
+			effect->play_rate_us = config->play_rate_us;
+		}
+
+		effect->lra_auto_res_disable = of_property_read_bool(child_node,
+				"qcom,lra-auto-resonance-disable");
+
+		tmp = of_property_count_elems_of_size(child_node,
+				"qcom,wf-brake-pattern", sizeof(u8));
+		if (tmp > 0) {
+			if (tmp > HAP_BRAKE_PATTERN_MAX) {
+				dev_err(chip->dev, "wf-brake-pattern shouldn't be more than %d bytes\n",
+						HAP_BRAKE_PATTERN_MAX);
+				return -EINVAL;
+			}
+
+			rc = of_property_read_u8_array(child_node,
+					"qcom,wf-brake-pattern",
+					effect->brake, tmp);
+			if (rc < 0) {
+				dev_err(chip->dev, "Failed to get wf-brake-pattern, rc=%d\n",
+						rc);
+				return rc;
+			}
+
+			effect->brake_pattern_length = tmp;
+			verify_brake_setting(effect);
+		}
+
+		effect->wf_src = INT_WF_BUFFER;
+		if (of_property_read_bool(child_node, "qcom,wf-line-in-pwm"))
+			effect->wf_src = EXT_WF_PWM;
+		if (of_property_read_bool(child_node, "qcom,wf-line-in-audio"))
+			effect->wf_src = EXT_WF_AUDIO;
+
+		/*
+		 * Ignore wf-pattern configuration iff it's
+		 * supposed to play waveform/signal from LINE-IN
+		 * pin
+		 */
+		if (effect->wf_src != INT_WF_BUFFER)
+			continue;
+
 		rc = of_property_count_elems_of_size(child_node,
 				"qcom,wf-pattern", sizeof(u8));
 		if (rc < 0) {
@@ -1231,22 +1300,6 @@
 			return rc;
 		}
 
-		effect->play_rate_us = config->play_rate_us;
-		rc = of_property_read_u32(child_node, "qcom,wf-play-rate-us",
-				&tmp);
-		if (rc < 0)
-			dev_dbg(chip->dev, "Read qcom,wf-play-rate-us failed, rc=%d\n",
-					rc);
-		else
-			effect->play_rate_us = tmp;
-
-		if (config->act_type == ACT_LRA &&
-				!config->lra_allow_variable_play_rate &&
-				config->play_rate_us != effect->play_rate_us) {
-			dev_warn(chip->dev, "play rate should match with LRA resonance frequency\n");
-			effect->play_rate_us = config->play_rate_us;
-		}
-
 		rc = of_property_read_u32(child_node, "qcom,wf-repeat-count",
 				&tmp);
 		if (rc < 0) {
@@ -1272,53 +1325,33 @@
 
 			effect->wf_s_repeat_n = j;
 		}
-
-		effect->lra_auto_res_disable = of_property_read_bool(child_node,
-				"qcom,lra-auto-resonance-disable");
-
-		tmp = of_property_count_elems_of_size(child_node,
-				"qcom,wf-brake-pattern", sizeof(u8));
-		if (tmp <= 0)
-			continue;
-
-		if (tmp > HAP_BRAKE_PATTERN_MAX) {
-			dev_err(chip->dev, "wf-brake-pattern shouldn't be more than %d bytes\n",
-					HAP_BRAKE_PATTERN_MAX);
-			return -EINVAL;
-		}
-
-		rc = of_property_read_u8_array(child_node,
-				"qcom,wf-brake-pattern", effect->brake, tmp);
-		if (rc < 0) {
-			dev_err(chip->dev, "Failed to get wf-brake-pattern, rc=%d\n",
-					rc);
-			return rc;
-		}
-
-		effect->brake_pattern_length = tmp;
-		verify_brake_setting(effect);
 	}
 
 	for (j = 0; j < i; j++) {
 		dev_dbg(chip->dev, "effect: %d\n", chip->predefined[j].id);
-		dev_dbg(chip->dev, "        vmax: %d mv\n",
+		dev_dbg(chip->dev, "    vmax: %d mv\n",
 				chip->predefined[j].vmax_mv);
-		dev_dbg(chip->dev, "        play_rate: %d us\n",
-				chip->predefined[j].play_rate_us);
-		for (m = 0; m < chip->predefined[j].pattern_length; m++)
-			dev_dbg(chip->dev, "        pattern[%d]: 0x%x\n",
-					m, chip->predefined[j].pattern[m]);
-		for (m = 0; m < chip->predefined[j].brake_pattern_length; m++)
-			dev_dbg(chip->dev, "        brake_pattern[%d]: 0x%x\n",
-					m, chip->predefined[j].brake[m]);
+		dev_dbg(chip->dev, "    waveform source: %s\n",
+				wf_src_str[chip->predefined[j].wf_src]);
 		dev_dbg(chip->dev, "    brake_en: %d\n",
 				chip->predefined[j].brake_en);
+		for (m = 0; m < chip->predefined[j].brake_pattern_length; m++)
+			dev_dbg(chip->dev, "    brake_pattern[%d]: 0x%x\n",
+					m, chip->predefined[j].brake[m]);
+		dev_dbg(chip->dev, "    lra_auto_res_disable: %d\n",
+				chip->predefined[j].lra_auto_res_disable);
+		if (chip->predefined[j].wf_src != INT_WF_BUFFER)
+			continue;
+
+		for (m = 0; m < chip->predefined[j].pattern_length; m++)
+			dev_dbg(chip->dev, "    pattern[%d]: 0x%x\n",
+					m, chip->predefined[j].pattern[m]);
+		dev_dbg(chip->dev, "    play_rate: %d us\n",
+				chip->predefined[j].play_rate_us);
 		dev_dbg(chip->dev, "    wf_repeat_n: %d\n",
 				chip->predefined[j].wf_repeat_n);
 		dev_dbg(chip->dev, "    wf_s_repeat_n: %d\n",
 				chip->predefined[j].wf_s_repeat_n);
-		dev_dbg(chip->dev, "    lra_auto_res_disable: %d\n",
-				chip->predefined[j].lra_auto_res_disable);
 	}
 
 	return 0;
@@ -1422,22 +1455,6 @@
 		config->play_rate_us = (tmp >= HAP_PLAY_RATE_US_MAX) ?
 			HAP_PLAY_RATE_US_MAX : tmp;
 
-	if (of_find_property(node, "qcom,external-waveform-source", NULL)) {
-		if (!of_property_read_string(node,
-				"qcom,external-waveform-source", &str)) {
-			if (strcmp(str, "audio") == 0) {
-				config->ext_src = EXT_WF_AUDIO;
-			} else if (strcmp(str, "pwm") == 0) {
-				config->ext_src = EXT_WF_PWM;
-			} else {
-				dev_err(chip->dev, "Invalid external waveform source: %s\n",
-						str);
-				return -EINVAL;
-			}
-		}
-		config->use_ext_wf_src = true;
-	}
-
 	if (of_find_property(node, "vdd-supply", NULL)) {
 		chip->vdd_supply = devm_regulator_get(chip->dev, "vdd");
 		if (IS_ERR(chip->vdd_supply)) {
@@ -1604,6 +1621,34 @@
 DEFINE_DEBUGFS_ATTRIBUTE(auto_res_debugfs_ops,  auto_res_dbgfs_read,
 		auto_res_dbgfs_write, "%llu\n");
 
+#define WF_SRC_BYTES	12
+static ssize_t wf_src_dbgfs_read(struct file *filep,
+		char __user *buf, size_t count, loff_t *ppos)
+{
+	struct qti_hap_effect *effect =
+		(struct qti_hap_effect *)filep->private_data;
+	char kbuf[WF_SRC_BYTES] = {0};
+	int rc, length;
+
+	length = snprintf(kbuf, WF_SRC_BYTES, "%s",
+			wf_src_str[effect->wf_src]);
+
+	if (length > WF_SRC_BYTES - 2)
+		return -EINVAL;
+
+	kbuf[length++] = '\n';
+	kbuf[length++] = '\0';
+
+	rc = simple_read_from_buffer(buf, count, ppos, kbuf, length);
+	return rc;
+}
+
+static const struct file_operations wf_src_dbgfs_ops = {
+	.read = wf_src_dbgfs_read,
+	.owner = THIS_MODULE,
+	.open = simple_open,
+};
+
 #define CHAR_PER_PATTERN 8
 static ssize_t brake_pattern_dbgfs_read(struct file *filep,
 		char __user *buf, size_t count, loff_t *ppos)
@@ -1787,20 +1832,6 @@
 		return -ENOMEM;
 	}
 
-	file = debugfs_create_file("wf_repeat_n", 0644, dir,
-			effect, &wf_repeat_n_debugfs_ops);
-	if (!file) {
-		pr_err("create wf-repeat debugfs node failed\n");
-		return -ENOMEM;
-	}
-
-	file = debugfs_create_file("wf_s_repeat_n", 0644, dir,
-			effect, &wf_s_repeat_n_debugfs_ops);
-	if (!file) {
-		pr_err("create wf-s-repeat debugfs node failed\n");
-		return -ENOMEM;
-	}
-
 	file = debugfs_create_file("lra_auto_res_en", 0644, dir,
 			effect, &auto_res_debugfs_ops);
 	if (!file) {
@@ -1815,6 +1846,16 @@
 		return -ENOMEM;
 	}
 
+	file = debugfs_create_file("wf_src", 0444, dir,
+			effect, &wf_src_dbgfs_ops);
+	if (!file) {
+		pr_err("create wf_src debugfs node failed\n");
+		return -ENOMEM;
+	}
+
+	if (effect->wf_src == EXT_WF_AUDIO || effect->wf_src == EXT_WF_PWM)
+		return 0;
+
 	file = debugfs_create_file("pattern", 0644, dir,
 			effect, &pattern_dbgfs_ops);
 	if (!file) {
@@ -1822,6 +1863,20 @@
 		return -ENOMEM;
 	}
 
+	file = debugfs_create_file("wf_repeat_n", 0644, dir,
+			effect, &wf_repeat_n_debugfs_ops);
+	if (!file) {
+		pr_err("create wf_repeat debugfs node failed\n");
+		return -ENOMEM;
+	}
+
+	file = debugfs_create_file("wf_s_repeat_n", 0644, dir,
+			effect, &wf_s_repeat_n_debugfs_ops);
+	if (!file) {
+		pr_err("create wf_s_repeat debugfs node failed\n");
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 8ec483e..26ec603f 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -39,6 +39,7 @@
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/miscdevice.h>
+#include <linux/overflow.h>
 #include <linux/input/mt.h>
 #include "../input-compat.h"
 
@@ -405,7 +406,7 @@
 static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
 				   const struct input_absinfo *abs)
 {
-	int min, max;
+	int min, max, range;
 
 	min = abs->minimum;
 	max = abs->maximum;
@@ -417,7 +418,7 @@
 		return -EINVAL;
 	}
 
-	if (abs->flat > max - min) {
+	if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
 		printk(KERN_DEBUG
 		       "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
 		       UINPUT_NAME, code, abs->flat, min, max);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index a94b649..f322a17 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1336,6 +1336,7 @@
 static const struct acpi_device_id elan_acpi_id[] = {
 	{ "ELAN0000", 0 },
 	{ "ELAN0100", 0 },
+	{ "ELAN0501", 0 },
 	{ "ELAN0600", 0 },
 	{ "ELAN0602", 0 },
 	{ "ELAN0605", 0 },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 2d95e8d..9fe075c 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1767,6 +1767,18 @@
 module_param_named(elantech_smbus, elantech_smbus, int, 0644);
 MODULE_PARM_DESC(elantech_smbus, "Use a secondary bus for the Elantech device.");
 
+static const char * const i2c_blacklist_pnp_ids[] = {
+	/*
+	 * These are known to not be working properly as bits are missing
+	 * in elan_i2c.
+	 */
+	"LEN2131", /* ThinkPad P52 w/ NFC */
+	"LEN2132", /* ThinkPad P52 */
+	"LEN2133", /* ThinkPad P72 w/ NFC */
+	"LEN2134", /* ThinkPad P72 */
+	NULL
+};
+
 static int elantech_create_smbus(struct psmouse *psmouse,
 				 struct elantech_device_info *info,
 				 bool leave_breadcrumbs)
@@ -1802,10 +1814,12 @@
 
 	if (elantech_smbus == ELANTECH_SMBUS_NOT_SET) {
 		/*
-		 * New ICs are enabled by default.
+		 * New ICs are enabled by default, unless mentioned in
+		 * i2c_blacklist_pnp_ids.
 		 * Old ICs are up to the user to decide.
 		 */
-		if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+		if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
+		    psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
 			return -ENXIO;
 	}
 
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 2bd5bb1..b6da0c1 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -171,6 +171,7 @@
 	"LEN0046", /* X250 */
 	"LEN004a", /* W541 */
 	"LEN005b", /* P50 */
+	"LEN005e", /* T560 */
 	"LEN0071", /* T480 */
 	"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
 	"LEN0073", /* X1 Carbon G5 (Elantech) */
@@ -178,6 +179,7 @@
 	"LEN0096", /* X280 */
 	"LEN0097", /* X280 -> ALPS trackpoint */
 	"LEN200f", /* T450s */
+	"SYN3052", /* HP EliteBook 840 G4 */
 	"SYN3221", /* HP 15-ay000 */
 	NULL
 };
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 3232af5..a7ace07 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1586,10 +1586,10 @@
 	/* T7 config may have changed */
 	mxt_init_t7_power_cfg(data);
 
-release_raw:
-	kfree(cfg.raw);
 release_mem:
 	kfree(cfg.mem);
+release_raw:
+	kfree(cfg.raw);
 	return ret;
 }
 
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index 775e92d..1afb997 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -3526,14 +3526,12 @@
 	hrtimer_start(&info->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
 #else
 	logError(0, "%s Interrupt Mode\n", tag);
-	if (request_irq(info->client->irq, fts_interrupt_handler,
-		IRQF_TRIGGER_LOW, info->client->name, info)) {
+	if (request_threaded_irq(info->client->irq, NULL, fts_interrupt_handler,
+		IRQF_TRIGGER_LOW | IRQF_ONESHOT, info->client->name, info)) {
 		logError(1, "%s Request irq failed\n", tag);
 		kfree(info->event_dispatch_table);
 		error = -EBUSY;
-	} /*else {*/
-	/*error = fts_enableInterrupt();*/
-	/*}*/
+	}
 #endif
 	return error;
 }
@@ -3558,6 +3556,21 @@
 #else
 	enable_irq(info->client->irq);
 #endif
+	/* enable the touch IC irq */
+	fts_enableInterrupt();
+}
+
+static void fts_interrupt_disable(struct fts_ts_info *info)
+{
+	/* disable the touch IC irq */
+	fts_disableInterrupt();
+
+#ifdef FTS_USE_POLLING_MODE
+	hrtimer_cancel(&info->timer);
+#else
+	disable_irq(info->client->irq);
+#endif
+
 }
 
 static int fts_init(struct fts_ts_info *info)
@@ -4041,11 +4054,6 @@
 
 	__pm_wakeup_event(&info->wakeup_source, HZ);
 
-	if (fts_enable_reg(info, true) < 0) {
-		logError(1, "%s %s: ERROR Failed to enable regulators\n",
-			tag, __func__);
-	}
-
 	if (info->ts_pinctrl) {
 		/*
 		 * Pinctrl handle is optional. If pinctrl handle is found
@@ -4060,10 +4068,11 @@
 	}
 
 	info->resume_bit = 1;
+
+	fts_system_reset();
 #ifdef USE_NOISE_PARAM
 	readNoiseParameters(noise_params);
 #endif
-	fts_system_reset();
 
 #ifdef USE_NOISE_PARAM
 	writeNoiseParameters(noise_params);
@@ -4075,7 +4084,7 @@
 
 	info->sensor_sleep = false;
 
-	fts_enableInterrupt();
+	fts_interrupt_enable(info);
 }
 
 
@@ -4091,11 +4100,10 @@
 
 	fts_mode_handler(info, 0);
 
+	fts_interrupt_disable(info);
 	release_all_touches(info);
 	info->sensor_sleep = true;
 
-	fts_enableInterrupt();
-
 	if (info->ts_pinctrl) {
 		/*
 		 * Pinctrl handle is optional. If pinctrl handle is found
@@ -4109,7 +4117,6 @@
 		}
 	}
 
-	fts_enable_reg(info, false);
 }
 
 
diff --git a/drivers/input/touchscreen/st/fts.h b/drivers/input/touchscreen/st/fts.h
index 7575590..84553fe 100644
--- a/drivers/input/touchscreen/st/fts.h
+++ b/drivers/input/touchscreen/st/fts.h
@@ -66,7 +66,7 @@
 
 
 /**** FEATURES USED IN THE IC ***/
-#define PHONE_KEY /*enable the keys*/
+/* #define PHONE_KEY enable the keys */
 
 #define PHONE_GESTURE /*allow to use the gestures*/
 #ifdef PHONE_GESTURE
diff --git a/drivers/input/touchscreen/st/fts_gui.c b/drivers/input/touchscreen/st/fts_gui.c
index a6aa89c..a2bfe1a 100644
--- a/drivers/input/touchscreen/st/fts_gui.c
+++ b/drivers/input/touchscreen/st/fts_gui.c
@@ -122,8 +122,7 @@
 	ret = sscanf(buf, "%x %x %x %x %x %x %x %x %x ",
 		(data + 8), (data), (data + 1), (data + 2), (data + 3),
 		(data + 4), (data + 5), (data + 6), (data + 7));
-	if (ret != 9)
-		return -EINVAL;
+
 	byte_count = data[8];
 
 	/**
@@ -251,8 +250,7 @@
 	ret = sscanf(buf, "%x %x %x %x %x %x %x %x %x ",
 		(data + 8), (data), (data + 1), (data + 2), (data + 3),
 		(data + 4), (data + 5), (data + 6), (data + 7));
-	if (ret != 9)
-		return -EINVAL;
+
 	byte_count = data[8];
 
 	if (byte_count > 8) {
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 33a1acb..7ca65c5 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -405,6 +405,16 @@
 
 	  If unsure, say N here.
 
+config IOMMU_TLBSYNC_DEBUG
+	bool "TLB sync timeout debug"
+	depends on ARM_SMMU
+	help
+	  Enables to collect the SMMU system state information right
+	  after the first TLB sync timeout failure by calling BUG().
+	  Note to use this only on debug builds.
+
+	  If unsure, say N here.
+
 config QCOM_LAZY_MAPPING
 	bool "Reference counted iommu-mapping support"
 	depends on ION
diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h
index 4695c25..a0e15d5 100644
--- a/drivers/iommu/arm-smmu-regs.h
+++ b/drivers/iommu/arm-smmu-regs.h
@@ -191,6 +191,8 @@
 #define TLBSTATUS_SACTIVE		(1 << 0)
 #define ARM_SMMU_CB_ATS1PR		0x800
 #define ARM_SMMU_CB_ATSR		0x8f0
+#define ARM_SMMU_STATS_SYNC_INV_TBU_ACK 0x25dc
+#define ARM_SMMU_TBU_PWR_STATUS         0x2204
 
 #define SCTLR_MEM_ATTR_SHIFT		16
 #define SCTLR_SHCFG_SHIFT		22
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index fc9fa1d..ee76a88 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -837,7 +837,13 @@
 			cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
 		cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
 		cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
-		cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ent->sync.msidata);
+		/*
+		 * Commands are written little-endian, but we want the SMMU to
+		 * receive MSIData, and thus write it back to memory, in CPU
+		 * byte order, so big-endian needs an extra byteswap here.
+		 */
+		cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA,
+				     cpu_to_le32(ent->sync.msidata));
 		cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
 		break;
 	default:
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index cf0f4d8..f0a7648 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -250,6 +250,7 @@
 #define ARM_SMMU_OPT_NO_ASID_RETENTION	(1 << 5)
 #define ARM_SMMU_OPT_STATIC_CB		(1 << 6)
 #define ARM_SMMU_OPT_DISABLE_ATOS	(1 << 7)
+#define ARM_SMMU_OPT_NO_DYNAMIC_ASID	(1 << 8)
 	u32				options;
 	enum arm_smmu_arch_version	version;
 	enum arm_smmu_implementation	model;
@@ -387,6 +388,7 @@
 	{ ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
 	{ ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
 	{ ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
+	{ ARM_SMMU_OPT_NO_DYNAMIC_ASID, "qcom,no-dynamic-asid" },
 	{ 0, NULL},
 };
 
@@ -693,6 +695,20 @@
 	return smmu->arch_ops->device_group(dev, group);
 }
 
+static void arm_smmu_arch_write_sync(struct arm_smmu_device *smmu)
+{
+	u32 id;
+
+	if (!smmu)
+		return;
+
+	/* Read to complete prior write transcations */
+	id = readl_relaxed(ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_ID0);
+
+	/* Wait for read to complete before off */
+	rmb();
+}
+
 static struct device_node *dev_get_dev_node(struct device *dev)
 {
 	if (dev_is_pci(dev)) {
@@ -940,6 +956,9 @@
 static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
 {
 	unsigned long flags;
+	struct arm_smmu_device *smmu = pwr->dev->driver_data;
+
+	arm_smmu_arch_write_sync(smmu);
 
 	spin_lock_irqsave(&pwr->clock_refs_lock, flags);
 	if (pwr->clock_refs_count == 0) {
@@ -1080,6 +1099,7 @@
 				void __iomem *sync, void __iomem *status)
 {
 	unsigned int spin_cnt, delay;
+	u32 sync_inv_ack, tbu_pwr_status;
 
 	writel_relaxed(0, sync);
 	for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
@@ -1090,9 +1110,15 @@
 		}
 		udelay(delay);
 	}
+	sync_inv_ack = scm_io_read((unsigned long)(smmu->phys_addr +
+				     ARM_SMMU_STATS_SYNC_INV_TBU_ACK));
+	tbu_pwr_status = scm_io_read((unsigned long)(smmu->phys_addr +
+				     ARM_SMMU_TBU_PWR_STATUS));
 	trace_tlbsync_timeout(smmu->dev, 0);
 	dev_err_ratelimited(smmu->dev,
-			    "TLB sync timed out -- SMMU may be deadlocked\n");
+			    "TLB sync timed out -- SMMU may be deadlocked ack 0x%x pwr 0x%x\n",
+			    sync_inv_ack, tbu_pwr_status);
+	BUG_ON(IS_ENABLED(CONFIG_IOMMU_TLBSYNC_DEBUG));
 	return -EINVAL;
 }
 
@@ -1376,6 +1402,62 @@
 	.free_pages_exact = arm_smmu_free_pages_exact,
 };
 
+static void print_ctx_regs(struct arm_smmu_device *smmu, struct arm_smmu_cfg
+			   *cfg, unsigned int fsr)
+{
+	u32 fsynr0;
+	void __iomem *cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
+	void __iomem *gr1_base = ARM_SMMU_GR1(smmu);
+	bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+
+	fsynr0 = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
+
+	dev_err(smmu->dev, "FAR    = 0x%016llx\n",
+		readq_relaxed(cb_base + ARM_SMMU_CB_FAR));
+	dev_err(smmu->dev, "PAR    = 0x%pK\n",
+		readq_relaxed(cb_base + ARM_SMMU_CB_PAR));
+
+	dev_err(smmu->dev,
+		"FSR    = 0x%08x [%s%s%s%s%s%s%s%s%s%s]\n",
+		fsr,
+		(fsr & 0x02) ?  (fsynr0 & 0x10 ?
+				 "TF W " : "TF R ") : "",
+		(fsr & 0x04) ? "AFF " : "",
+		(fsr & 0x08) ? (fsynr0 & 0x10 ?
+				"PF W " : "PF R ") : "",
+		(fsr & 0x10) ? "EF " : "",
+		(fsr & 0x20) ? "TLBMCF " : "",
+		(fsr & 0x40) ? "TLBLKF " : "",
+		(fsr & 0x80) ? "MHF " : "",
+		(fsr & 0x100) ? "UUT " : "",
+		(fsr & 0x40000000) ? "SS " : "",
+		(fsr & 0x80000000) ? "MULTI " : "");
+
+	if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+		dev_err(smmu->dev, "TTBR0  = 0x%pK\n",
+			readl_relaxed(cb_base + ARM_SMMU_CB_TTBR0));
+		dev_err(smmu->dev, "TTBR1  = 0x%pK\n",
+			readl_relaxed(cb_base + ARM_SMMU_CB_TTBR1));
+	} else {
+		dev_err(smmu->dev, "TTBR0  = 0x%pK\n",
+			readq_relaxed(cb_base + ARM_SMMU_CB_TTBR0));
+		if (stage1)
+			dev_err(smmu->dev, "TTBR1  = 0x%pK\n",
+				readq_relaxed(cb_base + ARM_SMMU_CB_TTBR1));
+	}
+
+
+	dev_err(smmu->dev, "SCTLR  = 0x%08x ACTLR  = 0x%08x\n",
+	       readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR),
+	       readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR));
+	dev_err(smmu->dev, "CBAR  = 0x%08x\n",
+	       readl_relaxed(gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)));
+	dev_err(smmu->dev, "MAIR0   = 0x%08x MAIR1   = 0x%08x\n",
+	       readl_relaxed(cb_base + ARM_SMMU_CB_S1_MAIR0),
+	       readl_relaxed(cb_base + ARM_SMMU_CB_S1_MAIR1));
+
+}
+
 static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
 					 dma_addr_t iova, u32 fsr)
 {
@@ -1463,29 +1545,17 @@
 		ret = IRQ_HANDLED;
 		resume = RESUME_TERMINATE;
 	} else {
-		phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
-							      fsr);
 		if (__ratelimit(&_rs)) {
+			phys_addr_t phys_atos = arm_smmu_verify_fault(domain,
+								      iova,
+								      fsr);
+
 			dev_err(smmu->dev,
 				"Unhandled context fault: iova=0x%08lx, cb=%d, fsr=0x%x, fsynr0=0x%x, fsynr1=0x%x\n",
 				iova, cfg->cbndx, fsr, fsynr0, fsynr1);
-			dev_err(smmu->dev, "FAR    = %016lx\n",
-				(unsigned long)iova);
-			dev_err(smmu->dev,
-				"FSR    = %08x [%s%s%s%s%s%s%s%s%s%s]\n",
-				fsr,
-				(fsr & 0x02) ?  (fsynr0 & 0x10 ?
-						"TF W " : "TF R ") : "",
-				(fsr & 0x04) ? "AFF " : "",
-				(fsr & 0x08) ? (fsynr0 & 0x10 ?
-						"PF W " : "PF R ") : "",
-				(fsr & 0x10) ? "EF " : "",
-				(fsr & 0x20) ? "TLBMCF " : "",
-				(fsr & 0x40) ? "TLBLKF " : "",
-				(fsr & 0x80) ? "MHF " : "",
-				(fsr & 0x100) ? "UUT " : "",
-				(fsr & 0x40000000) ? "SS " : "",
-				(fsr & 0x80000000) ? "MULTI " : "");
+
+			print_ctx_regs(smmu, cfg, fsr);
+
 			dev_err(smmu->dev,
 				"soft iova-to-phys=%pa\n", &phys_soft);
 			if (!phys_soft)
@@ -1784,7 +1854,7 @@
 	bool dynamic = is_dynamic_domain(domain);
 	int ret;
 
-	if (!dynamic) {
+	if (!dynamic || (smmu->options & ARM_SMMU_OPT_NO_DYNAMIC_ASID)) {
 		cfg->asid = cfg->cbndx + 1;
 	} else {
 		mutex_lock(&smmu->idr_mutex);
@@ -2426,7 +2496,10 @@
 
 	mutex_lock(&smmu->stream_map_mutex);
 	for_each_cfg_sme(fwspec, i, idx) {
-		WARN_ON(s2cr[idx].attach_count == 0);
+		if (WARN_ON(s2cr[idx].attach_count == 0)) {
+			mutex_unlock(&smmu->stream_map_mutex);
+			return;
+		}
 		s2cr[idx].attach_count -= 1;
 
 		if (s2cr[idx].attach_count > 0)
@@ -4324,7 +4397,7 @@
 	pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
 	if (!pwr->bus_client) {
 		dev_err(dev, "Bus client registration failed\n");
-		return -EINVAL;
+		return -EPROBE_DEFER;
 	}
 
 	return 0;
@@ -5018,6 +5091,11 @@
 #define DEBUG_PAR_PA_SHIFT		12
 #define DEBUG_PAR_FAULT_VAL		0x1
 
+#define DEBUG_AXUSER_REG		0x30
+#define DEBUG_AXUSER_CDMID_MASK         0xff
+#define DEBUG_AXUSER_CDMID_SHIFT        36
+#define DEBUG_AXUSER_CDMID_VAL          255
+
 #define TBU_DBG_TIMEOUT_US		100
 
 struct actlr_setting {
@@ -5277,9 +5355,13 @@
 redo:
 	/* Set address and stream-id */
 	val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+	val &= ~DEBUG_SID_HALT_SID_MASK;
 	val |= sid & DEBUG_SID_HALT_SID_MASK;
 	writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
 	writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
+	val = (u64)(DEBUG_AXUSER_CDMID_VAL & DEBUG_AXUSER_CDMID_MASK) <<
+		DEBUG_AXUSER_CDMID_SHIFT;
+	writeq_relaxed(val, tbu->base + DEBUG_AXUSER_REG);
 
 	/*
 	 * Write-back Read and Write-Allocate
@@ -5336,6 +5418,9 @@
 	/* Reset hardware */
 	writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
 	writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
+	val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
+	val &= ~DEBUG_SID_HALT_SID_MASK;
+	writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
 
 	/*
 	 * After a failed translation, the next successful translation will
@@ -5351,6 +5436,12 @@
 	qsmmuv500_tbu_resume(tbu);
 
 out_power_off:
+	/* Read to complete prior write transcations */
+	val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
+
+	/* Wait for read to complete before off */
+	rmb();
+
 	arm_smmu_power_off(tbu->pwr);
 
 	return phys;
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index e7994ba..27523fc 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/dma-contiguous.h>
@@ -540,12 +540,22 @@
 	void *addr;
 	unsigned long flags;
 	struct sg_mapping_iter miter;
-	unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
+	size_t count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
 	bool is_coherent = is_dma_coherent(dev, attrs);
 	int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, is_coherent, attrs);
 	pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
 	struct page **pages;
 
+	/*
+	 * sg_alloc_table_from_pages accepts unsigned int value for count
+	 * so check count doesn't exceed UINT_MAX.
+	 */
+
+	if (count > UINT_MAX) {
+		dev_err(dev, "count: %zx exceeds UNIT_MAX\n", count);
+		return NULL;
+	}
+
 	*handle = DMA_ERROR_CODE;
 
 	pages = __fast_smmu_alloc_pages(count, gfp);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a76c47f..2b8f5eb 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2069,7 +2069,7 @@
 	 * than default.  Unnecessary for PT mode.
 	 */
 	if (translation != CONTEXT_TT_PASS_THROUGH) {
-		for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+		for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
 			ret = -ENOMEM;
 			pgd = phys_to_virt(dma_pte_addr(pgd));
 			if (!dma_pte_present(pgd))
@@ -2083,7 +2083,7 @@
 			translation = CONTEXT_TT_MULTI_LEVEL;
 
 		context_set_address_root(context, virt_to_phys(pgd));
-		context_set_address_width(context, iommu->agaw);
+		context_set_address_width(context, agaw);
 	} else {
 		/*
 		 * In pass through mode, AW must be programmed to
@@ -5230,7 +5230,7 @@
 	struct iommu_resv_region *entry, *next;
 
 	list_for_each_entry_safe(entry, next, head, list) {
-		if (entry->type == IOMMU_RESV_RESERVED)
+		if (entry->type == IOMMU_RESV_MSI)
 			kfree(entry);
 	}
 }
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index d137257..2b165b3 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -289,20 +289,20 @@
 	bool coherent;
 
 	if (ddev->domain) {
-		dev_err(dev, "Already attached.\n");
+		dev_err_ratelimited(dev, "Already attached.\n");
 		return -EBUSY;
 	}
 
 	iommu = of_iommu_configure(dev, dev->of_node);
 	if (!iommu) {
-		dev_err(dev, "Is not associated with an iommu\n");
+		dev_err_ratelimited(dev, "Is not associated with an iommu\n");
 		return -EINVAL;
 	}
 
 	coherent = of_dma_is_coherent(dev->of_node);
 
 	if (!dev->iommu_group) {
-		dev_err(dev, "Does not have an iommu group\n");
+		dev_err_ratelimited(dev, "Does not have an iommu group\n");
 		return -EINVAL;
 	}
 
@@ -310,7 +310,7 @@
 	domain = iommu_get_domain_for_dev(dev);
 	if (domain) {
 		if (domain->type != IOMMU_DOMAIN_DMA) {
-			dev_err(dev, "Attached, but its not a default domain?\n");
+			dev_err_ratelimited(dev, "Attached, but its not a default domain?\n");
 			return -EINVAL;
 		}
 		iommu_detach_group(domain, dev->iommu_group);
@@ -318,19 +318,19 @@
 
 	domain = iommu_domain_alloc(dev->bus);
 	if (!domain) {
-		dev_err(dev, "Allocating iommu domain failed\n");
+		dev_err_ratelimited(dev, "Allocating iommu domain failed\n");
 		return -EINVAL;
 	}
 
 	domain->is_debug_domain = true;
 
 	if (iommu_debug_set_attrs(ddev, domain, attrs)) {
-		dev_err(dev, "Setting attrs failed\n");
+		dev_err_ratelimited(dev, "Setting attrs failed\n");
 		goto out_free_domain;
 	}
 
 	if (iommu_attach_group(domain, dev->iommu_group)) {
-		dev_err(dev, "attach group failed\n");
+		dev_err_ratelimited(dev, "attach group failed\n");
 		goto out_free_domain;
 	}
 
@@ -341,7 +341,7 @@
 	set_dma_ops(dev, NULL);
 	arch_setup_dma_ops(dev, dma_base, size, iommu, coherent);
 	if (!get_dma_ops(dev)) {
-		dev_err(dev, "arch_setup_dma_ops failed, dma ops are null.\n");
+		dev_err_ratelimited(dev, "arch_setup_dma_ops failed, dma ops are null.\n");
 		goto out_detach_group;
 	}
 
@@ -362,13 +362,13 @@
 	struct device *dev = ddev->dev;
 
 	if (!dev->iommu_group) {
-		dev_err(dev, "Does not have an iommu group\n");
+		dev_err_ratelimited(dev, "Does not have an iommu group\n");
 		return;
 	}
 
 	domain = ddev->domain;
 	if (!domain) {
-		dev_err(dev, "Is not attached\n");
+		dev_err_ratelimited(dev, "Is not attached\n");
 		return;
 	}
 
@@ -770,14 +770,14 @@
 	for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
 		dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
 		if (dma_addr == DMA_ERROR_CODE) {
-			dev_err(dev, "Failed map on iter %d\n", i);
+			dev_err_ratelimited(dev, "Failed map on iter %d\n", i);
 			ret = -EINVAL;
 			goto out;
 		}
 	}
 
 	if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
-		dev_err(dev,
+		dev_err_ratelimited(dev,
 			"dma_map_single unexpectedly (VA should have been exhausted)\n");
 		ret = -EINVAL;
 		goto out;
@@ -797,7 +797,7 @@
 	if (dma_addr != SZ_8K) {
 		dma_addr_t expected = SZ_8K;
 
-		dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
+		dev_err_ratelimited(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
 			&dma_addr, &expected);
 		ret = -EINVAL;
 		goto out;
@@ -812,14 +812,14 @@
 	if (dma_addr != 0) {
 		dma_addr_t expected = 0;
 
-		dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
+		dev_err_ratelimited(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
 			&dma_addr, &expected);
 		ret = -EINVAL;
 		goto out;
 	}
 
 	if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
-		dev_err(dev,
+		dev_err_ratelimited(dev,
 			"dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
 		ret = -EINVAL;
 		goto out;
@@ -869,7 +869,7 @@
 	virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
 	if (!virt) {
 		if (size > SZ_8K) {
-			dev_err(dev,
+			dev_err_ratelimited(dev,
 				"Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
 				_size_to_string(size));
 			return 0;
@@ -881,7 +881,7 @@
 	for (iova = 0, i = 0; iova < max; iova += size, ++i) {
 		dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
 		if (dma_addr == DMA_ERROR_CODE) {
-			dev_err(dev, "Failed map on iter %d\n", i);
+			dev_err_ratelimited(dev, "Failed map on iter %d\n", i);
 			ret = -EINVAL;
 			goto out;
 		}
@@ -914,7 +914,7 @@
 	}
 
 	if (unmapped != remapped) {
-		dev_err(dev,
+		dev_err_ratelimited(dev,
 			"Unexpected random remap count! Unmapped %d but remapped %d\n",
 			unmapped, remapped);
 		ret = -EINVAL;
@@ -959,7 +959,7 @@
 	virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
 	if (!virt) {
 		if (size > SZ_8K) {
-			dev_err(dev,
+			dev_err_ratelimited(dev,
 				"Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
 				_size_to_string(size));
 			return 0;
@@ -988,7 +988,7 @@
 			phys_addr_t expected = phys;
 
 			if (__check_mapping(dev, domain, iova, expected)) {
-				dev_err(dev, "iter: %d\n", i);
+				dev_err_ratelimited(dev, "iter: %d\n", i);
 				ret = -EINVAL;
 				goto out;
 			}
@@ -999,7 +999,7 @@
 			unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
 
 			if (__check_mapping(dev, domain, theiova, expected)) {
-				dev_err(dev, "iter: %d\n", i);
+				dev_err_ratelimited(dev, "iter: %d\n", i);
 				ret = -EINVAL;
 				goto out;
 			}
@@ -1158,7 +1158,7 @@
 		pa = iommu_iova_to_phys(domain, iova);
 		pa2 = iommu_iova_to_phys_hard(domain, iova);
 		if (pa != pa2) {
-			dev_err(dev,
+			dev_err_ratelimited(dev,
 				"iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
 				&pa, &pa2);
 			ret = -EINVAL;
@@ -1166,7 +1166,7 @@
 		}
 		pa2 = virt_to_phys(data);
 		if (pa != pa2) {
-			dev_err(dev,
+			dev_err_ratelimited(dev,
 				"iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
 				&pa, &pa2);
 			ret = -EINVAL;
@@ -1175,7 +1175,8 @@
 		dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
 		for (j = 0; j < size; ++j) {
 			if (data[j] != 0xa5) {
-				dev_err(dev, "data[%d] != 0xa5\n", data[j]);
+				dev_err_ratelimited(dev,
+					       "data[%d] != 0xa5\n", data[j]);
 				ret = -EINVAL;
 				goto out;
 			}
@@ -1230,7 +1231,7 @@
 			pa = iommu_iova_to_phys(domain, iova);
 			pa2 = iommu_iova_to_phys_hard(domain, iova);
 			if (pa != pa2) {
-				dev_err(dev,
+				dev_err_ratelimited(dev,
 					"iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
 					&pa, &pa2);
 				ret = -EINVAL;
@@ -1241,7 +1242,7 @@
 			pa = iommu_iova_to_phys(domain, iova);
 			pa2 = iommu_iova_to_phys_hard(domain, iova);
 			if (pa != pa2) {
-				dev_err(dev,
+				dev_err_ratelimited(dev,
 					"iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
 					&pa, &pa2);
 				ret = -EINVAL;
@@ -1289,7 +1290,7 @@
 		goto out_release_mapping;
 	}
 
-	dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
+	dev_err_ratelimited(dev, "testing with pgtables at %pa\n", &pt_phys);
 	if (iommu_enable_config_clocks(domain)) {
 		ds_printf(dev, s, "Couldn't enable clocks\n");
 		goto out_release_mapping;
@@ -1378,7 +1379,7 @@
 	int val, ret;
 
 	if (kstrtoint_from_user(ubuf, count, 0, &val)) {
-		pr_err("Invalid format. Expected a hex or decimal integer");
+		pr_err_ratelimited("Invalid format. Expected a hex or decimal integer");
 		return -EFAULT;
 	}
 
@@ -1386,10 +1387,10 @@
 	if (val) {
 		ret = iommu_debug_dma_reconfigure(ddev, attrs, 0, SZ_1G * 4ULL);
 		if (!ret)
-			pr_err("Attached\n");
+			pr_err_ratelimited("Attached\n");
 	} else {
 		iommu_debug_dma_deconfigure(ddev);
-		pr_err("Detached\n");
+		pr_err_ratelimited("Detached\n");
 	}
 	mutex_unlock(&ddev->state_lock);
 	retval = count;
@@ -1424,7 +1425,7 @@
 	c[0] = ddev->domain ? '1' : '0';
 	c[1] = '\n';
 	if (copy_to_user(ubuf, &c, 2)) {
-		pr_err("copy_to_user failed\n");
+		pr_err_ratelimited("copy_to_user failed\n");
 		return -EFAULT;
 	}
 	*offset = 1;		/* non-zero means we're done */
@@ -1459,7 +1460,7 @@
 
 	buflen = strlen(buf);
 	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err("Couldn't copy_to_user\n");
+		pr_err_ratelimited("Couldn't copy_to_user\n");
 		retval = -EFAULT;
 	} else {
 		*offset = 1;	/* non-zero means we're done */
@@ -1494,13 +1495,13 @@
 	dma_addr_t iova;
 
 	if (kstrtox_from_user(ubuf, count, 0, &iova)) {
-		pr_err("Invalid format for iova\n");
+		pr_err_ratelimited("Invalid format for iova\n");
 		ddev->iova = 0;
 		return -EINVAL;
 	}
 
 	ddev->iova = iova;
-	pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+	pr_err_ratelimited("Saved iova=%pa for future PTE commands\n", &iova);
 	return count;
 }
 
@@ -1515,7 +1516,7 @@
 	size_t buflen;
 
 	if (kptr_restrict != 0) {
-		pr_err("kptr_restrict needs to be disabled.\n");
+		pr_err_ratelimited("kptr_restrict needs to be disabled.\n");
 		return -EPERM;
 	}
 
@@ -1524,7 +1525,7 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
@@ -1540,7 +1541,7 @@
 
 	buflen = strlen(buf);
 	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err("Couldn't copy_to_user\n");
+		pr_err_ratelimited("Couldn't copy_to_user\n");
 		retval = -EFAULT;
 	} else {
 		*offset = 1;	/* non-zero means we're done */
@@ -1565,13 +1566,13 @@
 	dma_addr_t iova;
 
 	if (kstrtox_from_user(ubuf, count, 0, &iova)) {
-		pr_err("Invalid format for iova\n");
+		pr_err_ratelimited("Invalid format for iova\n");
 		ddev->iova = 0;
 		return -EINVAL;
 	}
 
 	ddev->iova = iova;
-	pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
+	pr_err_ratelimited("Saved iova=%pa for future ATOS commands\n", &iova);
 	return count;
 }
 
@@ -1585,7 +1586,7 @@
 	size_t buflen;
 
 	if (kptr_restrict != 0) {
-		pr_err("kptr_restrict needs to be disabled.\n");
+		pr_err_ratelimited("kptr_restrict needs to be disabled.\n");
 		return -EPERM;
 	}
 
@@ -1594,7 +1595,7 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
@@ -1605,7 +1606,7 @@
 	if (!phys) {
 		strlcpy(buf, "FAIL\n", 100);
 		phys = iommu_iova_to_phys(ddev->domain, ddev->iova);
-		dev_err(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
+		dev_err_ratelimited(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
 			&ddev->iova, &phys);
 	} else {
 		snprintf(buf, 100, "%pa\n", &phys);
@@ -1613,7 +1614,7 @@
 
 	buflen = strlen(buf);
 	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err("Couldn't copy_to_user\n");
+		pr_err_ratelimited("Couldn't copy_to_user\n");
 		retval = -EFAULT;
 	} else {
 		*offset = 1;	/* non-zero means we're done */
@@ -1640,7 +1641,7 @@
 	size_t buflen;
 
 	if (kptr_restrict != 0) {
-		pr_err("kptr_restrict needs to be disabled.\n");
+		pr_err_ratelimited("kptr_restrict needs to be disabled.\n");
 		return -EPERM;
 	}
 	if (*offset)
@@ -1648,7 +1649,7 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
@@ -1664,7 +1665,7 @@
 
 	buflen = strlen(buf);
 	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err("Couldn't copy_to_user\n");
+		pr_err_ratelimited("Couldn't copy_to_user\n");
 		retval = -EFAULT;
 	} else {
 		*offset = 1;	/* non-zero means we're done */
@@ -1695,14 +1696,14 @@
 	struct iommu_debug_device *ddev = file->private_data;
 
 	if (count >= 100) {
-		pr_err("Value too large\n");
+		pr_err_ratelimited("Value too large\n");
 		return -EINVAL;
 	}
 
 	memset(buf, 0, 100);
 
 	if (copy_from_user(buf, ubuf, count)) {
-		pr_err("Couldn't copy from user\n");
+		pr_err_ratelimited("Couldn't copy from user\n");
 		retval = -EFAULT;
 	}
 
@@ -1735,27 +1736,27 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
 
 	ret = iommu_map(ddev->domain, iova, phys, size, prot);
 	if (ret) {
-		pr_err("iommu_map failed with %d\n", ret);
+		pr_err_ratelimited("iommu_map failed with %d\n", ret);
 		retval = -EIO;
 		goto out;
 	}
 
 	retval = count;
-	pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
+	pr_err_ratelimited("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
 	       &iova, &phys, size, prot);
 out:
 	mutex_unlock(&ddev->state_lock);
 	return retval;
 
 invalid_format:
-	pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
+	pr_err_ratelimited("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
 	return -EINVAL;
 }
 
@@ -1789,14 +1790,14 @@
 	struct device *dev = ddev->dev;
 
 	if (count >= sizeof(buf)) {
-		pr_err("Value too large\n");
+		pr_err_ratelimited("Value too large\n");
 		return -EINVAL;
 	}
 
 	memset(buf, 0, sizeof(buf));
 
 	if (copy_from_user(buf, ubuf, count)) {
-		pr_err("Couldn't copy from user\n");
+		pr_err_ratelimited("Couldn't copy from user\n");
 		return -EFAULT;
 	}
 
@@ -1836,7 +1837,7 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
@@ -1845,26 +1846,27 @@
 					DMA_TO_DEVICE, dma_attrs);
 
 	if (dma_mapping_error(dev, iova)) {
-		pr_err("Failed to perform dma_map_single\n");
+		pr_err_ratelimited("Failed to perform dma_map_single\n");
 		ret = -EINVAL;
 		goto out;
 	}
 
 	retval = count;
-	pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
+	pr_err_ratelimited("Mapped 0x%p to %pa (len=0x%zx)\n",
 			v_addr, &iova, size);
 	ddev->iova = iova;
-		pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+		pr_err_ratelimited("Saved iova=%pa for future PTE commands\n",
+				&iova);
 out:
 	mutex_unlock(&ddev->state_lock);
 	return retval;
 
 invalid_format:
-	pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
+	pr_err_ratelimited("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
 	return retval;
 
 invalid_addr:
-	pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
+	pr_err_ratelimited("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
 	return retval;
 }
 
@@ -1887,7 +1889,7 @@
 
 	buflen = strlen(buf);
 	if (copy_to_user(ubuf, buf, buflen)) {
-		pr_err("Couldn't copy_to_user\n");
+		pr_err_ratelimited("Couldn't copy_to_user\n");
 		retval = -EFAULT;
 	} else {
 		*offset = 1;	/* non-zero means we're done */
@@ -1916,19 +1918,19 @@
 	struct iommu_debug_device *ddev = file->private_data;
 
 	if (count >= 100) {
-		pr_err("Value too large\n");
+		pr_err_ratelimited("Value too large\n");
 		return -EINVAL;
 	}
 
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		return -EINVAL;
 	}
 
 	memset(buf, 0, 100);
 
 	if (copy_from_user(buf, ubuf, count)) {
-		pr_err("Couldn't copy from user\n");
+		pr_err_ratelimited("Couldn't copy from user\n");
 		retval = -EFAULT;
 		goto out;
 	}
@@ -1948,27 +1950,27 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
 
 	unmapped = iommu_unmap(ddev->domain, iova, size);
 	if (unmapped != size) {
-		pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
+		pr_err_ratelimited("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
 		       size, unmapped);
 		retval = -EIO;
 		goto out;
 	}
 
 	retval = count;
-	pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
+	pr_err_ratelimited("Unmapped %pa (len=0x%zx)\n", &iova, size);
 out:
 	mutex_unlock(&ddev->state_lock);
 	return retval;
 
 invalid_format:
-	pr_err("Invalid format. Expected: iova,len\n");
+	pr_err_ratelimited("Invalid format. Expected: iova,len\n");
 	return -EINVAL;
 }
 
@@ -1992,14 +1994,14 @@
 	struct device *dev = ddev->dev;
 
 	if (count >= sizeof(buf)) {
-		pr_err("Value too large\n");
+		pr_err_ratelimited("Value too large\n");
 		return -EINVAL;
 	}
 
 	memset(buf, 0, sizeof(buf));
 
 	if (copy_from_user(buf, ubuf, count)) {
-		pr_err("Couldn't copy from user\n");
+		pr_err_ratelimited("Couldn't copy from user\n");
 		retval = -EFAULT;
 		goto out;
 	}
@@ -2036,20 +2038,20 @@
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
 	dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
 
 	retval = count;
-	pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
+	pr_err_ratelimited("Unmapped %pa (len=0x%zx)\n", &iova, size);
 out:
 	mutex_unlock(&ddev->state_lock);
 	return retval;
 
 invalid_format:
-	pr_err("Invalid format. Expected: iova,len, dma attr\n");
+	pr_err_ratelimited("Invalid format. Expected: iova,len, dma attr\n");
 	return retval;
 }
 
@@ -2068,17 +2070,17 @@
 
 	/* we're expecting a single character plus (optionally) a newline */
 	if (count > 2) {
-		dev_err(dev, "Invalid value\n");
+		dev_err_ratelimited(dev, "Invalid value\n");
 		return -EINVAL;
 	}
 
 	if (!ddev->domain) {
-		dev_err(dev, "No domain. Did you already attach?\n");
+		dev_err_ratelimited(dev, "No domain. Did you already attach?\n");
 		return -EINVAL;
 	}
 
 	if (copy_from_user(&buf, ubuf, 1)) {
-		dev_err(dev, "Couldn't copy from user\n");
+		dev_err_ratelimited(dev, "Couldn't copy from user\n");
 		return -EFAULT;
 	}
 
@@ -2086,26 +2088,26 @@
 	switch (buf) {
 	case '0':
 		if (ddev->clk_count == 0) {
-			dev_err(dev, "Config clocks already disabled\n");
+			dev_err_ratelimited(dev, "Config clocks already disabled\n");
 			break;
 		}
 
 		if (--ddev->clk_count > 0)
 			break;
 
-		dev_err(dev, "Disabling config clocks\n");
+		dev_err_ratelimited(dev, "Disabling config clocks\n");
 		iommu_disable_config_clocks(ddev->domain);
 		break;
 	case '1':
 		if (ddev->clk_count++ > 0)
 			break;
 
-		dev_err(dev, "Enabling config clocks\n");
+		dev_err_ratelimited(dev, "Enabling config clocks\n");
 		if (iommu_enable_config_clocks(ddev->domain))
-			dev_err(dev, "Failed!\n");
+			dev_err_ratelimited(dev, "Failed!\n");
 		break;
 	default:
-		dev_err(dev, "Invalid value. Should be 0 or 1.\n");
+		dev_err_ratelimited(dev, "Invalid value. Should be 0 or 1.\n");
 		mutex_unlock(&ddev->clk_lock);
 		return -EINVAL;
 	}
@@ -2127,13 +2129,13 @@
 	unsigned long flags;
 
 	if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
-		pr_err("Invalid flags format\n");
+		pr_err_ratelimited("Invalid flags format\n");
 		return -EFAULT;
 	}
 
 	mutex_lock(&ddev->state_lock);
 	if (!ddev->domain) {
-		pr_err("No domain. Did you already attach?\n");
+		pr_err_ratelimited("No domain. Did you already attach?\n");
 		mutex_unlock(&ddev->state_lock);
 		return -EINVAL;
 	}
@@ -2177,147 +2179,147 @@
 	ddev->dev = dev;
 	dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
 	if (!dir) {
-		pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s debugfs dir\n",
 		       dev_name(dev));
 		goto err;
 	}
 
 	if (!debugfs_create_file("nr_iters", 0400, dir, &iters_per_op,
 				&iommu_debug_nr_iters_ops)) {
-		pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("test_virt_addr", 0400, dir, ddev,
 				&iommu_debug_test_virt_addr_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("profiling", 0400, dir, ddev,
 				 &iommu_debug_profiling_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/profiling debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("secure_profiling", 0400, dir, ddev,
 				 &iommu_debug_secure_profiling_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("profiling_fast", 0400, dir, ddev,
 				 &iommu_debug_profiling_fast_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("profiling_fast_dma_api", 0400, dir, ddev,
 				 &iommu_debug_profiling_fast_dma_api_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("functional_fast_dma_api", 0400, dir, ddev,
 				 &iommu_debug_functional_fast_dma_api_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("functional_arm_dma_api", 0400, dir, ddev,
 				 &iommu_debug_functional_arm_dma_api_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("dma_attach", 0600, dir, ddev,
 				 &iommu_debug_dma_attach_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("attach", 0400, dir, ddev,
 				 &iommu_debug_attach_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/attach debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("secure_attach", 0400, dir, ddev,
 				 &iommu_debug_secure_attach_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("atos", 0200, dir, ddev,
 				 &iommu_debug_atos_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/atos debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("dma_atos", 0600, dir, ddev,
 				 &iommu_debug_dma_atos_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("map", 0200, dir, ddev,
 				 &iommu_debug_map_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/map debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("dma_map", 0600, dir, ddev,
 					 &iommu_debug_dma_map_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
 		       dev_name(dev));
 			goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("unmap", 0200, dir, ddev,
 				 &iommu_debug_unmap_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/unmap debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("dma_unmap", 0200, dir, ddev,
 					 &iommu_debug_dma_unmap_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
 		       dev_name(dev));
 			goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("pte", 0600, dir, ddev,
 			&iommu_debug_pte_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/pte debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("config_clocks", 0200, dir, ddev,
 				 &iommu_debug_config_clocks_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
 
 	if (!debugfs_create_file("trigger-fault", 0200, dir, ddev,
 				 &iommu_debug_trigger_fault_fops)) {
-		pr_err("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
+		pr_err_ratelimited("Couldn't create iommu/devices/%s/trigger-fault debugfs file\n",
 		       dev_name(dev));
 		goto err_rmdir;
 	}
@@ -2337,7 +2339,7 @@
 	debugfs_tests_dir = debugfs_create_dir("tests",
 					       iommu_debugfs_top);
 	if (!debugfs_tests_dir) {
-		pr_err("Couldn't create iommu/tests debugfs directory\n");
+		pr_err_ratelimited("Couldn't create iommu/tests debugfs directory\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index f7787e7..0e0e88e 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -103,6 +103,14 @@
 	int err;
 
 	ops = iommu_ops_from_fwnode(fwnode);
+	/*
+	 * Return -EPROBE_DEFER for the platform devices which are dependent
+	 * on the SMMU driver registration. Deferring from here helps in adding
+	 * the clients in proper iommu groups.
+	 */
+	if (!dev_is_pci(dev) && of_device_is_available(iommu_spec->np) && !ops)
+		return -EPROBE_DEFER;
+
 	if ((ops && !ops->of_xlate) ||
 	    !of_device_is_available(iommu_spec->np))
 		return NO_IOMMU;
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index d097373..3a97fe6 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -27,6 +27,7 @@
 	u32 iidr;
 	u32 mask;
 };
+extern bool from_suspend;
 
 #ifdef CONFIG_QCOM_SHOW_RESUME_IRQ
 extern int msm_show_resume_irq_mask;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c2df341..cf3abb8 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2267,13 +2267,14 @@
 	kfree(its_dev);
 }
 
-static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
+static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
 {
 	int idx;
 
-	idx = find_first_zero_bit(dev->event_map.lpi_map,
-				  dev->event_map.nr_lpis);
-	if (idx == dev->event_map.nr_lpis)
+	idx = bitmap_find_free_region(dev->event_map.lpi_map,
+				      dev->event_map.nr_lpis,
+				      get_count_order(nvecs));
+	if (idx < 0)
 		return -ENOSPC;
 
 	*hwirq = dev->event_map.lpi_base + idx;
@@ -2369,21 +2370,21 @@
 	int err;
 	int i;
 
-	for (i = 0; i < nr_irqs; i++) {
-		err = its_alloc_device_irq(its_dev, &hwirq);
-		if (err)
-			return err;
+	err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
+	if (err)
+		return err;
 
-		err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
+	for (i = 0; i < nr_irqs; i++) {
+		err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
 		if (err)
 			return err;
 
 		irq_domain_set_hwirq_and_chip(domain, virq + i,
-					      hwirq, &its_irq_chip, its_dev);
+					      hwirq + i, &its_irq_chip, its_dev);
 		irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
 		pr_debug("ID:%d pID:%d vID:%d\n",
-			 (int)(hwirq - its_dev->event_map.lpi_base),
-			 (int) hwirq, virq + i);
+			 (int)(hwirq + i - its_dev->event_map.lpi_base),
+			 (int)(hwirq + i), virq + i);
 	}
 
 	return 0;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 0e5c34c..16c7637 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -894,6 +894,9 @@
 static int gic_cpu_pm_notifier(struct notifier_block *self,
 			       unsigned long cmd, void *v)
 {
+	if (from_suspend)
+		return NOTIFY_OK;
+
 	if (cmd == CPU_PM_EXIT) {
 		if (gic_dist_security_disabled())
 			gic_enable_redist(true);
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 0ff517d..a4ceb61 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -852,7 +852,7 @@
 	u16 ret;
 
 	if (contr == 0) {
-		strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
+		strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
 		return CAPI_NOERROR;
 	}
 
@@ -860,7 +860,7 @@
 
 	ctr = get_capi_ctr_by_nr(contr);
 	if (ctr && ctr->state == CAPI_CTR_RUNNING) {
-		strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
+		strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
 		ret = CAPI_NOERROR;
 	} else
 		ret = CAPI_REGNOTINSTALLED;
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index df80c89..5d3faae 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -100,8 +100,9 @@
 		led_data->pwm = devm_pwm_get(dev, led->name);
 	if (IS_ERR(led_data->pwm)) {
 		ret = PTR_ERR(led_data->pwm);
-		dev_err(dev, "unable to request PWM for %s: %d\n",
-			led->name, ret);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "unable to request PWM for %s: %d\n",
+				led->name, ret);
 		return ret;
 	}
 
diff --git a/drivers/leds/leds-qti-tri-led.c b/drivers/leds/leds-qti-tri-led.c
index ad996da..db5d132 100644
--- a/drivers/leds/leds-qti-tri-led.c
+++ b/drivers/leds/leds-qti-tri-led.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/bitops.h>
@@ -371,6 +371,8 @@
 	if (rc < 0)
 		return rc;
 
+	cancel_work_sync(&led_cdev->set_brightness_work);
+
 	mutex_lock(&led->lock);
 	if (led->breathing == breath)
 		goto unlock;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 909ecad..5ddbb81 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -49,7 +49,7 @@
 	struct bio *bio_out;
 	struct bvec_iter iter_in;
 	struct bvec_iter iter_out;
-	sector_t cc_sector;
+	u64 cc_sector;
 	atomic_t cc_pending;
 	union {
 		struct skcipher_request *req;
@@ -81,7 +81,7 @@
 	struct convert_context *ctx;
 	struct scatterlist sg_in[4];
 	struct scatterlist sg_out[4];
-	sector_t iv_sector;
+	u64 iv_sector;
 };
 
 struct crypt_config;
@@ -160,7 +160,7 @@
 		struct iv_lmk_private lmk;
 		struct iv_tcw_private tcw;
 	} iv_gen_private;
-	sector_t iv_offset;
+	u64 iv_offset;
 	unsigned int iv_size;
 	unsigned short int sector_size;
 	unsigned char sector_shift;
@@ -2414,9 +2414,21 @@
 	 * capi:cipher_api_spec-iv:ivopts
 	 */
 	tmp = &cipher_in[strlen("capi:")];
-	cipher_api = strsep(&tmp, "-");
-	*ivmode = strsep(&tmp, ":");
-	*ivopts = tmp;
+
+	/* Separate IV options if present, it can contain another '-' in hash name */
+	*ivopts = strrchr(tmp, ':');
+	if (*ivopts) {
+		**ivopts = '\0';
+		(*ivopts)++;
+	}
+	/* Parse IV mode */
+	*ivmode = strrchr(tmp, '-');
+	if (*ivmode) {
+		**ivmode = '\0';
+		(*ivmode)++;
+	}
+	/* The rest is crypto API spec */
+	cipher_api = tmp;
 
 	if (*ivmode && !strcmp(*ivmode, "lmk"))
 		cc->tfms_count = 64;
@@ -2486,11 +2498,8 @@
 		goto bad_mem;
 
 	chainmode = strsep(&tmp, "-");
-	*ivopts = strsep(&tmp, "-");
-	*ivmode = strsep(&*ivopts, ":");
-
-	if (tmp)
-		DMWARN("Ignoring unexpected additional cipher options");
+	*ivmode = strsep(&tmp, ":");
+	*ivopts = tmp;
 
 	/*
 	 * For compatibility with the original dm-crypt mapping format, if
@@ -2789,7 +2798,7 @@
 	}
 
 	ret = -EINVAL;
-	if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
+	if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
 		ti->error = "Invalid device sector";
 		goto bad;
 	}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 2fb7bb4..fddffe2 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -141,7 +141,7 @@
 	unsigned long long tmpll;
 	char dummy;
 
-	if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
+	if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
 		ti->error = "Invalid device sector";
 		return -EINVAL;
 	}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 32aabe2..b86d243 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -213,7 +213,7 @@
 	devname = dm_shift_arg(&as);
 
 	r = -EINVAL;
-	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
+	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
 		ti->error = "Invalid device sector";
 		goto bad;
 	}
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 2fc4213..671c243 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -56,15 +56,17 @@
 	atomic_t nr_jobs;
 
 /*
- * We maintain three lists of jobs:
+ * We maintain four lists of jobs:
  *
  * i)   jobs waiting for pages
  * ii)  jobs that have pages, and are waiting for the io to be issued.
- * iii) jobs that have completed.
+ * iii) jobs that don't need to do any IO and just run a callback
+ * iv) jobs that have completed.
  *
- * All three of these are protected by job_lock.
+ * All four of these are protected by job_lock.
  */
 	spinlock_t job_lock;
+	struct list_head callback_jobs;
 	struct list_head complete_jobs;
 	struct list_head io_jobs;
 	struct list_head pages_jobs;
@@ -625,6 +627,7 @@
 	struct dm_kcopyd_client *kc = container_of(work,
 					struct dm_kcopyd_client, kcopyd_work);
 	struct blk_plug plug;
+	unsigned long flags;
 
 	/*
 	 * The order that these are called is *very* important.
@@ -633,6 +636,10 @@
 	 * list.  io jobs call wake when they complete and it all
 	 * starts again.
 	 */
+	spin_lock_irqsave(&kc->job_lock, flags);
+	list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
+	spin_unlock_irqrestore(&kc->job_lock, flags);
+
 	blk_start_plug(&plug);
 	process_jobs(&kc->complete_jobs, kc, run_complete_job);
 	process_jobs(&kc->pages_jobs, kc, run_pages_job);
@@ -650,7 +657,7 @@
 	struct dm_kcopyd_client *kc = job->kc;
 	atomic_inc(&kc->nr_jobs);
 	if (unlikely(!job->source.count))
-		push(&kc->complete_jobs, job);
+		push(&kc->callback_jobs, job);
 	else if (job->pages == &zero_page_list)
 		push(&kc->io_jobs, job);
 	else
@@ -858,7 +865,7 @@
 	job->read_err = read_err;
 	job->write_err = write_err;
 
-	push(&kc->complete_jobs, job);
+	push(&kc->callback_jobs, job);
 	wake(kc);
 }
 EXPORT_SYMBOL(dm_kcopyd_do_callback);
@@ -888,6 +895,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	spin_lock_init(&kc->job_lock);
+	INIT_LIST_HEAD(&kc->callback_jobs);
 	INIT_LIST_HEAD(&kc->complete_jobs);
 	INIT_LIST_HEAD(&kc->io_jobs);
 	INIT_LIST_HEAD(&kc->pages_jobs);
@@ -939,6 +947,7 @@
 	/* Wait for completion of all jobs submitted by this client. */
 	wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
 
+	BUG_ON(!list_empty(&kc->callback_jobs));
 	BUG_ON(!list_empty(&kc->complete_jobs));
 	BUG_ON(!list_empty(&kc->io_jobs));
 	BUG_ON(!list_empty(&kc->pages_jobs));
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 391537b..f0b088a 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -45,7 +45,7 @@
 	}
 
 	ret = -EINVAL;
-	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
+	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
 		ti->error = "Invalid device sector";
 		goto bad;
 	}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 79eab10..5a51151 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -943,7 +943,8 @@
 	char dummy;
 	int ret;
 
-	if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
+	if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
+	    offset != (sector_t)offset) {
 		ti->error = "Invalid offset";
 		return -EINVAL;
 	}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index ae4b33d..36805b12 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -19,6 +19,7 @@
 #include <linux/vmalloc.h>
 #include <linux/log2.h>
 #include <linux/dm-kcopyd.h>
+#include <linux/semaphore.h>
 
 #include "dm.h"
 
@@ -105,6 +106,9 @@
 	/* The on disk metadata handler */
 	struct dm_exception_store *store;
 
+	/* Maximum number of in-flight COW jobs. */
+	struct semaphore cow_count;
+
 	struct dm_kcopyd_client *kcopyd_client;
 
 	/* Wait for events based on state_bits */
@@ -145,6 +149,19 @@
 #define RUNNING_MERGE          0
 #define SHUTDOWN_MERGE         1
 
+/*
+ * Maximum number of chunks being copied on write.
+ *
+ * The value was decided experimentally as a trade-off between memory
+ * consumption, stalling the kernel's workqueues and maintaining a high enough
+ * throughput.
+ */
+#define DEFAULT_COW_THRESHOLD 2048
+
+static int cow_threshold = DEFAULT_COW_THRESHOLD;
+module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
+MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
+
 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
 		"A percentage of time allocated for copy on write");
 
@@ -1190,6 +1207,8 @@
 		goto bad_hash_tables;
 	}
 
+	sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
+
 	s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
 	if (IS_ERR(s->kcopyd_client)) {
 		r = PTR_ERR(s->kcopyd_client);
@@ -1575,6 +1594,7 @@
 		rb_link_node(&pe->out_of_order_node, parent, p);
 		rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
 	}
+	up(&s->cow_count);
 }
 
 /*
@@ -1598,6 +1618,7 @@
 	dest.count = src.count;
 
 	/* Hand over to kcopyd */
+	down(&s->cow_count);
 	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
 }
 
@@ -1617,6 +1638,7 @@
 	pe->full_bio = bio;
 	pe->full_bio_end_io = bio->bi_end_io;
 
+	down(&s->cow_count);
 	callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
 						   copy_callback, pe);
 
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 485626d..b065df3 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1938,6 +1938,9 @@
 	 */
 	if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
 		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+
+	/* io_pages is used for readahead */
+	q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
 }
 
 unsigned int dm_table_get_num_targets(struct dm_table *t)
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 20b0776..ed3cace 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1678,7 +1678,7 @@
 	return r;
 }
 
-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
+int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
 {
 	int r;
 	uint32_t ref_count;
@@ -1686,7 +1686,7 @@
 	down_read(&pmd->root_lock);
 	r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
 	if (!r)
-		*result = (ref_count != 0);
+		*result = (ref_count > 1);
 	up_read(&pmd->root_lock);
 
 	return r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 35e954e..f6be0d7 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -195,7 +195,7 @@
 
 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
 
-int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
+int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
 
 int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
 int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1f225a1..c30a785 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1048,7 +1048,7 @@
 	 * passdown we have to check that these blocks are now unused.
 	 */
 	int r = 0;
-	bool used = true;
+	bool shared = true;
 	struct thin_c *tc = m->tc;
 	struct pool *pool = tc->pool;
 	dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
@@ -1058,11 +1058,11 @@
 	while (b != end) {
 		/* find start of unmapped run */
 		for (; b < end; b++) {
-			r = dm_pool_block_is_used(pool->pmd, b, &used);
+			r = dm_pool_block_is_shared(pool->pmd, b, &shared);
 			if (r)
 				goto out;
 
-			if (!used)
+			if (!shared)
 				break;
 		}
 
@@ -1071,11 +1071,11 @@
 
 		/* find end of run */
 		for (e = b + 1; e != end; e++) {
-			r = dm_pool_block_is_used(pool->pmd, e, &used);
+			r = dm_pool_block_is_shared(pool->pmd, e, &shared);
 			if (r)
 				goto out;
 
-			if (used)
+			if (shared)
 				break;
 		}
 
diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
index 954b7ab..e673dac 100644
--- a/drivers/md/dm-unstripe.c
+++ b/drivers/md/dm-unstripe.c
@@ -78,7 +78,7 @@
 		goto err;
 	}
 
-	if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1) {
+	if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
 		ti->error = "Invalid striped device offset";
 		goto err;
 	}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index e6e925a..6518b01 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1935,12 +1935,14 @@
 }
 
 static struct stripe_head *
-r5c_recovery_alloc_stripe(struct r5conf *conf,
-			  sector_t stripe_sect)
+r5c_recovery_alloc_stripe(
+		struct r5conf *conf,
+		sector_t stripe_sect,
+		int noblock)
 {
 	struct stripe_head *sh;
 
-	sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
+	sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
 	if (!sh)
 		return NULL;  /* no more stripe available */
 
@@ -2150,7 +2152,7 @@
 						stripe_sect);
 
 		if (!sh) {
-			sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
+			sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
 			/*
 			 * cannot get stripe from raid5_get_active_stripe
 			 * try replay some stripes
@@ -2159,20 +2161,29 @@
 				r5c_recovery_replay_stripes(
 					cached_stripe_list, ctx);
 				sh = r5c_recovery_alloc_stripe(
-					conf, stripe_sect);
+					conf, stripe_sect, 1);
 			}
 			if (!sh) {
+				int new_size = conf->min_nr_stripes * 2;
 				pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
 					mdname(mddev),
-					conf->min_nr_stripes * 2);
-				raid5_set_cache_size(mddev,
-						     conf->min_nr_stripes * 2);
-				sh = r5c_recovery_alloc_stripe(conf,
-							       stripe_sect);
+					new_size);
+				ret = raid5_set_cache_size(mddev, new_size);
+				if (conf->min_nr_stripes <= new_size / 2) {
+					pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
+						mdname(mddev),
+						ret,
+						new_size,
+						conf->min_nr_stripes,
+						conf->max_nr_stripes);
+					return -ENOMEM;
+				}
+				sh = r5c_recovery_alloc_stripe(
+					conf, stripe_sect, 0);
 			}
 			if (!sh) {
 				pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
-				       mdname(mddev));
+					mdname(mddev));
 				return -ENOMEM;
 			}
 			list_add_tail(&sh->lru, cached_stripe_list);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e4e98f4..45a3551 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6357,6 +6357,7 @@
 int
 raid5_set_cache_size(struct mddev *mddev, int size)
 {
+	int result = 0;
 	struct r5conf *conf = mddev->private;
 
 	if (size <= 16 || size > 32768)
@@ -6373,11 +6374,14 @@
 
 	mutex_lock(&conf->cache_size_mutex);
 	while (size > conf->max_nr_stripes)
-		if (!grow_one_stripe(conf, GFP_KERNEL))
+		if (!grow_one_stripe(conf, GFP_KERNEL)) {
+			conf->min_nr_stripes = conf->max_nr_stripes;
+			result = -ENOMEM;
 			break;
+		}
 	mutex_unlock(&conf->cache_size_mutex);
 
-	return 0;
+	return result;
 }
 EXPORT_SYMBOL(raid5_set_cache_size);
 
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index a537e51..a7ea27d 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -442,7 +442,7 @@
 				(adap->needs_hpd &&
 				 (!adap->is_configured && !adap->is_configuring)) ||
 				kthread_should_stop() ||
-				(!adap->transmitting &&
+				(!adap->transmit_in_progress &&
 				 !list_empty(&adap->transmit_queue)),
 				msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
 			timeout = err == 0;
@@ -450,7 +450,7 @@
 			/* Otherwise we just wait for something to happen. */
 			wait_event_interruptible(adap->kthread_waitq,
 				kthread_should_stop() ||
-				(!adap->transmitting &&
+				(!adap->transmit_in_progress &&
 				 !list_empty(&adap->transmit_queue)));
 		}
 
@@ -475,6 +475,7 @@
 			pr_warn("cec-%s: message %*ph timed out\n", adap->name,
 				adap->transmitting->msg.len,
 				adap->transmitting->msg.msg);
+			adap->transmit_in_progress = false;
 			adap->tx_timeouts++;
 			/* Just give up on this. */
 			cec_data_cancel(adap->transmitting,
@@ -486,7 +487,7 @@
 		 * If we are still transmitting, or there is nothing new to
 		 * transmit, then just continue waiting.
 		 */
-		if (adap->transmitting || list_empty(&adap->transmit_queue))
+		if (adap->transmit_in_progress || list_empty(&adap->transmit_queue))
 			goto unlock;
 
 		/* Get a new message to transmit */
@@ -532,6 +533,8 @@
 		if (adap->ops->adap_transmit(adap, data->attempts,
 					     signal_free_time, &data->msg))
 			cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
+		else
+			adap->transmit_in_progress = true;
 
 unlock:
 		mutex_unlock(&adap->lock);
@@ -562,14 +565,17 @@
 	data = adap->transmitting;
 	if (!data) {
 		/*
-		 * This can happen if a transmit was issued and the cable is
+		 * This might happen if a transmit was issued and the cable is
 		 * unplugged while the transmit is ongoing. Ignore this
 		 * transmit in that case.
 		 */
-		dprintk(1, "%s was called without an ongoing transmit!\n",
-			__func__);
-		goto unlock;
+		if (!adap->transmit_in_progress)
+			dprintk(1, "%s was called without an ongoing transmit!\n",
+				__func__);
+		adap->transmit_in_progress = false;
+		goto wake_thread;
 	}
+	adap->transmit_in_progress = false;
 
 	msg = &data->msg;
 
@@ -635,7 +641,6 @@
 	 * for transmitting or to retry the current message.
 	 */
 	wake_up_interruptible(&adap->kthread_waitq);
-unlock:
 	mutex_unlock(&adap->lock);
 }
 EXPORT_SYMBOL_GPL(cec_transmit_done_ts);
@@ -1483,8 +1488,11 @@
 		if (adap->monitor_all_cnt)
 			WARN_ON(call_op(adap, adap_monitor_all_enable, false));
 		mutex_lock(&adap->devnode.lock);
-		if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
+		if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) {
 			WARN_ON(adap->ops->adap_enable(adap, false));
+			adap->transmit_in_progress = false;
+			wake_up_interruptible(&adap->kthread_waitq);
+		}
 		mutex_unlock(&adap->devnode.lock);
 		if (phys_addr == CEC_PHYS_ADDR_INVALID)
 			return;
@@ -1492,6 +1500,7 @@
 
 	mutex_lock(&adap->devnode.lock);
 	adap->last_initiator = 0xff;
+	adap->transmit_in_progress = false;
 
 	if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) &&
 	    adap->ops->adap_enable(adap, true)) {
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
index 6e31142..0496d93 100644
--- a/drivers/media/cec/cec-pin.c
+++ b/drivers/media/cec/cec-pin.c
@@ -601,8 +601,9 @@
 			break;
 		/* Was the message ACKed? */
 		ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v;
-		if (!ack && !pin->tx_ignore_nack_until_eom &&
-		    pin->tx_bit / 10 < pin->tx_msg.len && !pin->tx_post_eom) {
+		if (!ack && (!pin->tx_ignore_nack_until_eom ||
+		    pin->tx_bit / 10 == pin->tx_msg.len - 1) &&
+		    !pin->tx_post_eom) {
 			/*
 			 * Note: the CEC spec is ambiguous regarding
 			 * what action to take when a NACK appears
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index f40ab57..2036b94 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -1738,7 +1738,7 @@
 		unsigned s;	\
 	\
 		for (s = 0; s < len; s++) {	\
-			u8 chr = font8x16[text[s] * 16 + line];	\
+			u8 chr = font8x16[(u8)text[s] * 16 + line];	\
 	\
 			if (hdiv == 2 && tpg->hflip) { \
 				pos[3] = (chr & (0x01 << 6) ? fg : bg);	\
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 16c7b20..6889c25 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -800,6 +800,9 @@
 		memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
 		q->memory = memory;
 		q->waiting_for_buffers = !q->is_output;
+	} else if (q->memory != memory) {
+		dprintk(1, "memory model mismatch\n");
+		return -EINVAL;
 	}
 
 	num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
@@ -1930,9 +1933,13 @@
 			return -EINVAL;
 		}
 	}
+
+	mutex_lock(&q->mmap_lock);
+
 	if (vb2_fileio_is_active(q)) {
 		dprintk(1, "mmap: file io in progress\n");
-		return -EBUSY;
+		ret = -EBUSY;
+		goto unlock;
 	}
 
 	/*
@@ -1940,7 +1947,7 @@
 	 */
 	ret = __find_plane_by_offset(q, off, &buffer, &plane);
 	if (ret)
-		return ret;
+		goto unlock;
 
 	vb = q->bufs[buffer];
 
@@ -1953,11 +1960,13 @@
 	if (length < (vma->vm_end - vma->vm_start)) {
 		dprintk(1,
 			"MMAP invalid, as it would overflow buffer length\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto unlock;
 	}
 
-	mutex_lock(&q->mmap_lock);
 	ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
+
+unlock:
 	mutex_unlock(&q->mmap_lock);
 	if (ret)
 		return ret;
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
index 1c933b2..3ef5df1 100644
--- a/drivers/media/firewire/firedtv-avc.c
+++ b/drivers/media/firewire/firedtv-avc.c
@@ -968,7 +968,8 @@
 	return r->operand[7];
 }
 
-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
+int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
+		    unsigned int *len)
 {
 	struct avc_command_frame *c = (void *)fdtv->avc_data;
 	struct avc_response_frame *r = (void *)fdtv->avc_data;
@@ -1009,7 +1010,8 @@
 	return ret;
 }
 
-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
+int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
+		unsigned int *len)
 {
 	struct avc_command_frame *c = (void *)fdtv->avc_data;
 	struct avc_response_frame *r = (void *)fdtv->avc_data;
diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
index 876cdec..009905a 100644
--- a/drivers/media/firewire/firedtv.h
+++ b/drivers/media/firewire/firedtv.h
@@ -124,8 +124,10 @@
 		    struct dvb_diseqc_master_cmd *diseqcmd);
 void avc_remote_ctrl_work(struct work_struct *work);
 int avc_register_remote_control(struct firedtv *fdtv);
-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
+int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
+		    unsigned int *len);
+int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
+		unsigned int *len);
 int avc_ca_reset(struct firedtv *fdtv);
 int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length);
 int avc_ca_get_time_date(struct firedtv *fdtv, int *interval);
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index f8c70f1..8cc3bdb 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -636,16 +636,19 @@
 
 static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val)
 {
+	unsigned int uint_val;
 	int err;
 
-	err = regmap_read(priv->regmap, addr, (unsigned int *)val);
+	err = regmap_read(priv->regmap, addr, &uint_val);
 	if (err)
 		dev_err(&priv->client->dev,
 			"%s : i2c read failed, addr = %x\n", __func__, addr);
 	else
 		dev_dbg(&priv->client->dev,
 			"%s : addr 0x%x, val=0x%x\n", __func__,
-			addr, *val);
+			addr, uint_val);
+
+	*val = uint_val;
 	return err;
 }
 
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 30b15e9..8e7a2a5 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -2020,6 +2020,7 @@
 	struct ov5640_dev *sensor = to_ov5640_dev(sd);
 	const struct ov5640_mode_info *new_mode;
 	struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
+	struct v4l2_mbus_framefmt *fmt;
 	int ret;
 
 	if (format->pad != 0)
@@ -2037,22 +2038,20 @@
 	if (ret)
 		goto out;
 
-	if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-		struct v4l2_mbus_framefmt *fmt =
-			v4l2_subdev_get_try_format(sd, cfg, 0);
+	if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+		fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+	else
+		fmt = &sensor->fmt;
 
-		*fmt = *mbus_fmt;
-		goto out;
-	}
+	*fmt = *mbus_fmt;
 
 	if (new_mode != sensor->current_mode) {
 		sensor->current_mode = new_mode;
 		sensor->pending_mode_change = true;
 	}
-	if (mbus_fmt->code != sensor->fmt.code) {
-		sensor->fmt = *mbus_fmt;
+	if (mbus_fmt->code != sensor->fmt.code)
 		sensor->pending_fmt_change = true;
-	}
+
 out:
 	mutex_unlock(&sensor->lock);
 	return ret;
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 39804d8..fd5c52b 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -23,6 +23,7 @@
 #include <linux/moduleparam.h>
 #include <linux/kmod.h>
 #include <linux/kernel.h>
+#include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
@@ -41,6 +42,18 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(CX23885_VERSION);
 
+/*
+ * Some platforms have been found to require periodic resetting of the DMA
+ * engine. Ryzen and XEON platforms are known to be affected. The symptom
+ * encountered is "mpeg risc op code error". Only Ryzen platforms employ
+ * this workaround if the option equals 1. The workaround can be explicitly
+ * disabled for all platforms by setting to 0, the workaround can be forced
+ * on for any platform by setting to 2.
+ */
+static unsigned int dma_reset_workaround = 1;
+module_param(dma_reset_workaround, int, 0644);
+MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable");
+
 static unsigned int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "enable debug messages");
@@ -603,8 +616,13 @@
 
 static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
 {
-	uint32_t reg1_val = cx_read(TC_REQ); /* read-only */
-	uint32_t reg2_val = cx_read(TC_REQ_SET);
+	uint32_t reg1_val, reg2_val;
+
+	if (!dev->need_dma_reset)
+		return;
+
+	reg1_val = cx_read(TC_REQ); /* read-only */
+	reg2_val = cx_read(TC_REQ_SET);
 
 	if (reg1_val && reg2_val) {
 		cx_write(TC_REQ, reg1_val);
@@ -2058,6 +2076,37 @@
 	/* TODO: 23-19 */
 }
 
+static struct {
+	int vendor, dev;
+} const broken_dev_id[] = {
+	/* According with
+	 * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci,
+	 * 0x1451 is PCI ID for the IOMMU found on Ryzen
+	 */
+	{ PCI_VENDOR_ID_AMD, 0x1451 },
+};
+
+static bool cx23885_does_need_dma_reset(void)
+{
+	int i;
+	struct pci_dev *pdev = NULL;
+
+	if (dma_reset_workaround == 0)
+		return false;
+	else if (dma_reset_workaround == 2)
+		return true;
+
+	for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) {
+		pdev = pci_get_device(broken_dev_id[i].vendor,
+				      broken_dev_id[i].dev, NULL);
+		if (pdev) {
+			pci_dev_put(pdev);
+			return true;
+		}
+	}
+	return false;
+}
+
 static int cx23885_initdev(struct pci_dev *pci_dev,
 			   const struct pci_device_id *pci_id)
 {
@@ -2069,6 +2118,8 @@
 	if (NULL == dev)
 		return -ENOMEM;
 
+	dev->need_dma_reset = cx23885_does_need_dma_reset();
+
 	err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
 	if (err < 0)
 		goto fail_free;
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index d54c7ee..cf965ef 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -451,6 +451,8 @@
 	/* Analog raw audio */
 	struct cx23885_audio_dev   *audio_dev;
 
+	/* Does the system require periodic DMA resets? */
+	unsigned int		need_dma_reset:1;
 };
 
 static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev)
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
index 2c388d0..63e88dd 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_HW_INTF_H_
@@ -73,4 +73,8 @@
 	void                        *hw_priv;
 };
 
+/* hardware event callback function type */
+typedef int (*cam_hw_mgr_event_cb_func)(void *priv, uint32_t evt_id,
+	void *evt_data);
+
 #endif /* _CAM_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
index 261c457..1dcf4ee 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -31,6 +31,7 @@
  * @sfr_buf: buffer for subsystem failure reason[SFR]
  * @sec_heap: secondary heap hfi memory for firmware
  * @qdss: qdss mapped memory for fw
+ * @io_mem: io memory info
  * @icp_base: icp base address
  */
 struct hfi_mem_info {
@@ -42,6 +43,7 @@
 	struct hfi_mem sec_heap;
 	struct hfi_mem shmem;
 	struct hfi_mem qdss;
+	struct hfi_mem io_mem;
 	void __iomem *icp_base;
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
index d969e48..f67a704 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_HFI_REG_H_
@@ -35,8 +35,11 @@
 #define HFI_REG_UNCACHED_HEAP_PTR               0x5C
 #define HFI_REG_UNCACHED_HEAP_SIZE              0x60
 #define HFI_REG_QDSS_IOVA                       0x6C
-#define HFI_REG_QDSS_IOVA_SIZE                  0x70
 #define HFI_REG_SFR_PTR                         0x68
+#define HFI_REG_QDSS_IOVA_SIZE                  0x70
+#define HFI_REG_IO_REGION_IOVA                  0x74
+#define HFI_REG_IO_REGION_SIZE                  0x78
+
 /* end of ICP CSR registers */
 
 /* flags for ICP CSR registers */
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
index 055d911..b0f625c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -665,6 +665,10 @@
 		icp_base + HFI_REG_QDSS_IOVA);
 	cam_io_w_mb((uint32_t)hfi_mem->qdss.len,
 		icp_base + HFI_REG_QDSS_IOVA_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->io_mem.iova,
+		icp_base + HFI_REG_IO_REGION_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->io_mem.len,
+		icp_base + HFI_REG_IO_REGION_SIZE);
 
 	return rc;
 }
@@ -853,6 +857,10 @@
 		icp_base + HFI_REG_QDSS_IOVA);
 	cam_io_w_mb((uint32_t)hfi_mem->qdss.len,
 		icp_base + HFI_REG_QDSS_IOVA_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->io_mem.iova,
+		icp_base + HFI_REG_IO_REGION_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->io_mem.len,
+		icp_base + HFI_REG_IO_REGION_SIZE);
 
 	hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index 98d10c5..083bb98 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -368,7 +368,7 @@
 	uint32_t ubwc_ipe_cfg[ICP_UBWC_MAX] = {0};
 	uint32_t ubwc_bps_cfg[ICP_UBWC_MAX] = {0};
 	uint32_t index = 0;
-	int rc = 0;
+	int rc = 0, ddr_type = 0;
 
 	if (!device_priv) {
 		CAM_ERR(CAM_ICP, "Invalid arguments");
@@ -474,7 +474,9 @@
 
 		if (a5_soc->ubwc_config_ext) {
 			/* Invoke kernel API to determine DDR type */
-			if (of_fdt_get_ddrtype() == DDR_TYPE_LPDDR5)
+			ddr_type = of_fdt_get_ddrtype();
+			if ((ddr_type == DDR_TYPE_LPDDR5) ||
+				(ddr_type == DDR_TYPE_LPDDR5X))
 				index = 1;
 
 			ubwc_cfg_ext = &a5_soc->uconfig.ubwc_cfg_ext;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 2385b02..b3dae10d 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -2177,6 +2177,25 @@
 	return rc;
 }
 
+static int cam_icp_get_io_mem_info(void)
+{
+	int rc;
+	size_t len;
+	dma_addr_t iova;
+
+	rc = cam_smmu_get_io_region_info(icp_hw_mgr.iommu_hdl,
+		&iova, &len);
+	if (rc)
+		return rc;
+
+	icp_hw_mgr.hfi_mem.io_mem.iova_len = len;
+	icp_hw_mgr.hfi_mem.io_mem.iova_start = iova;
+
+	CAM_DBG(CAM_ICP, "iova: %llx, len: %zu", iova, len);
+
+	return rc;
+}
+
 static int cam_icp_allocate_hfi_mem(void)
 {
 	int rc;
@@ -2237,7 +2256,15 @@
 		goto sec_heap_alloc_failed;
 	}
 
+	rc = cam_icp_get_io_mem_info();
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to get I/O region info");
+		goto get_io_mem_failed;
+	}
+
 	return rc;
+get_io_mem_failed:
+	cam_mem_mgr_free_memory_region(&icp_hw_mgr.hfi_mem.sec_heap);
 sec_heap_alloc_failed:
 	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
 sfr_buf_alloc_failed:
@@ -2456,6 +2483,14 @@
 
 	hfi_mem.qdss.iova = icp_hw_mgr.hfi_mem.qdss_buf.iova;
 	hfi_mem.qdss.len = icp_hw_mgr.hfi_mem.qdss_buf.len;
+
+	hfi_mem.io_mem.iova = icp_hw_mgr.hfi_mem.io_mem.iova_start;
+	hfi_mem.io_mem.len = icp_hw_mgr.hfi_mem.io_mem.iova_len;
+
+	CAM_DBG(CAM_ICP, "IO region IOVA = %X length = %lld",
+			hfi_mem.io_mem.iova,
+			hfi_mem.io_mem.len);
+
 	return cam_hfi_resume(&hfi_mem,
 		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
 		hw_mgr->a5_jtag_debug);
@@ -2836,6 +2871,9 @@
 	hfi_mem.qdss.iova = icp_hw_mgr.hfi_mem.qdss_buf.iova;
 	hfi_mem.qdss.len = icp_hw_mgr.hfi_mem.qdss_buf.len;
 
+	hfi_mem.io_mem.iova = icp_hw_mgr.hfi_mem.io_mem.iova_start;
+	hfi_mem.io_mem.len = icp_hw_mgr.hfi_mem.io_mem.iova_len;
+
 	return cam_hfi_init(0, &hfi_mem,
 		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
 		hw_mgr->a5_jtag_debug);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index d20572d..9d15e72 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -74,6 +74,8 @@
  * @fw_buf: Memory info of firmware
  * @qdss_buf: Memory info of qdss
  * @sfr_buf: Memory info for sfr buffer
+ * @shmem: Memory info for shared region
+ * @io_mem: Memory info for io region
  */
 struct icp_hfi_mem_info {
 	struct cam_mem_mgr_memory_desc qtbl;
@@ -85,6 +87,7 @@
 	struct cam_mem_mgr_memory_desc qdss_buf;
 	struct cam_mem_mgr_memory_desc sfr_buf;
 	struct cam_smmu_region_info shmem;
+	struct cam_smmu_region_info io_mem;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 228931c..9994c7b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -50,6 +50,11 @@
 
 static struct cam_ife_hw_mgr g_ife_hw_mgr;
 
+static int cam_ife_hw_mgr_event_handler(
+	void                                *priv,
+	uint32_t                             evt_id,
+	void                                *evt_info);
+
 static int cam_ife_notify_safe_lut_scm(bool safe_trigger)
 {
 	uint32_t camera_hw_version, rc = 0;
@@ -693,7 +698,7 @@
 		vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_BUS_RD;
 		vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
 		vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
-		vfe_acquire.vfe_out.ctx = ife_ctx;
+		vfe_acquire.priv = ife_ctx;
 		vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
 		vfe_acquire.vfe_out.is_dual = ife_src_res->is_dual_vfe;
 		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
@@ -798,11 +803,12 @@
 			continue;
 
 		vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
-		vfe_acquire.vfe_out.ctx = ife_ctx;
+		vfe_acquire.priv = ife_ctx;
 		vfe_acquire.vfe_out.out_port_info = out_port;
 		vfe_acquire.vfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT;
 		vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
 		vfe_acquire.vfe_out.is_dual = 0;
+		vfe_acquire.event_cb = cam_ife_hw_mgr_event_handler;
 		hw_intf = ife_src_res->hw_res[0]->hw_intf;
 		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
 			&vfe_acquire,
@@ -839,7 +845,8 @@
 static int cam_ife_hw_mgr_acquire_res_ife_out_pixel(
 	struct cam_ife_hw_mgr_ctx       *ife_ctx,
 	struct cam_ife_hw_mgr_res       *ife_src_res,
-	struct cam_isp_in_port_info     *in_port)
+	struct cam_isp_in_port_info     *in_port,
+	bool                             acquire_lcr)
 {
 	int rc = -1;
 	uint32_t  i, j, k;
@@ -860,8 +867,13 @@
 		if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
 			continue;
 
-		CAM_DBG(CAM_ISP, "res_type 0x%x",
-			 out_port->res_type);
+		if ((acquire_lcr &&
+			out_port->res_type != CAM_ISP_IFE_OUT_RES_LCR) ||
+			(!acquire_lcr &&
+			out_port->res_type == CAM_ISP_IFE_OUT_RES_LCR))
+			continue;
+
+		CAM_DBG(CAM_ISP, "res_type 0x%x", out_port->res_type);
 
 		ife_out_res = &ife_ctx->res_list_ife_out[k];
 		ife_out_res->is_dual_vfe = in_port->usage_type;
@@ -869,10 +881,11 @@
 		vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
 		vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
 		vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
-		vfe_acquire.vfe_out.ctx = ife_ctx;
+		vfe_acquire.priv = ife_ctx;
 		vfe_acquire.vfe_out.out_port_info =  out_port;
 		vfe_acquire.vfe_out.is_dual       = ife_src_res->is_dual_vfe;
 		vfe_acquire.vfe_out.unique_id     = ife_ctx->ctx_index;
+		vfe_acquire.event_cb = cam_ife_hw_mgr_event_handler;
 
 		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
 			if (!ife_src_res->hw_res[j])
@@ -948,9 +961,12 @@
 		case CAM_ISP_HW_VFE_IN_CAMIF:
 		case CAM_ISP_HW_VFE_IN_PDLIB:
 		case CAM_ISP_HW_VFE_IN_RD:
+			rc = cam_ife_hw_mgr_acquire_res_ife_out_pixel(ife_ctx,
+				ife_src_res, in_port, false);
+			break;
 		case CAM_ISP_HW_VFE_IN_LCR:
 			rc = cam_ife_hw_mgr_acquire_res_ife_out_pixel(ife_ctx,
-				ife_src_res, in_port);
+				ife_src_res, in_port, true);
 			break;
 		case CAM_ISP_HW_VFE_IN_RDI0:
 		case CAM_ISP_HW_VFE_IN_RDI1:
@@ -1096,13 +1112,14 @@
 
 err:
 	/* release resource at the entry function */
-	CAM_DBG(CAM_ISP, "Exit rc(0x%x)", rc);
+	CAM_DBG(CAM_ISP, "Exit rc %d", rc);
 	return rc;
 }
 
 static int cam_ife_hw_mgr_acquire_res_ife_src(
 	struct cam_ife_hw_mgr_ctx     *ife_ctx,
-	struct cam_isp_in_port_info   *in_port)
+	struct cam_isp_in_port_info   *in_port,
+	bool                           acquire_lcr)
 {
 	int rc                = -1;
 	int i;
@@ -1115,7 +1132,10 @@
 	ife_hw_mgr = ife_ctx->hw_mgr;
 
 	list_for_each_entry(csid_res, &ife_ctx->res_list_ife_csid, list) {
-		if (csid_res->num_children)
+		if (csid_res->num_children && !acquire_lcr)
+			continue;
+
+		if (acquire_lcr && csid_res->res_id != CAM_IFE_PIX_PATH_RES_IPP)
 			continue;
 
 		rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
@@ -1131,10 +1151,17 @@
 		vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
 		vfe_acquire.vfe_in.cdm_ops = ife_ctx->cdm_ops;
 		vfe_acquire.vfe_in.in_port = in_port;
+		vfe_acquire.priv = ife_ctx;
+		vfe_acquire.event_cb = cam_ife_hw_mgr_event_handler;
 
 		switch (csid_res->res_id) {
 		case CAM_IFE_PIX_PATH_RES_IPP:
-			vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_CAMIF;
+			if (!acquire_lcr)
+				vfe_acquire.vfe_in.res_id =
+					CAM_ISP_HW_VFE_IN_CAMIF;
+			else
+				vfe_acquire.vfe_in.res_id =
+					CAM_ISP_HW_VFE_IN_LCR;
 			if (csid_res->is_dual_vfe)
 				vfe_acquire.vfe_in.sync_mode =
 				CAM_ISP_HW_SYNC_MASTER;
@@ -1206,10 +1233,6 @@
 
 		}
 
-		/* It should be one to one mapping between
-		 * csid resource and ife source resource
-		 */
-		csid_res->child[0] = ife_src_res;
 		ife_src_res->parent = csid_res;
 		csid_res->child[csid_res->num_children++] = ife_src_res;
 		CAM_DBG(CAM_ISP,
@@ -1366,6 +1389,7 @@
 	/* CID(DT_ID) value of acquire device, require for path */
 	cid_res_temp->res_id = csid_acquire.node_res->res_id;
 	cid_res_temp->is_dual_vfe = in_port->usage_type;
+	ife_ctx->is_dual = (bool)in_port->usage_type;
 
 	if (in_port->num_out_res)
 		cid_res_temp->is_secure = out_port->secure_mode;
@@ -1716,12 +1740,14 @@
 	int                         *ipp_count,
 	int                         *rdi_count,
 	int                         *ppp_count,
-	int                         *ife_rd_count)
+	int                         *ife_rd_count,
+	int                         *lcr_count)
 {
 	int ipp_num        = 0;
 	int rdi_num        = 0;
 	int ppp_num        = 0;
 	int ife_rd_num     = 0;
+	int lcr_num        = 0;
 	uint32_t i;
 	struct cam_isp_out_port_info      *out_port;
 	struct cam_ife_hw_mgr             *ife_hw_mgr;
@@ -1737,6 +1763,8 @@
 				rdi_num++;
 			else if (out_port->res_type == CAM_ISP_IFE_OUT_RES_2PD)
 				ppp_num++;
+			else if (out_port->res_type == CAM_ISP_IFE_OUT_RES_LCR)
+				lcr_num++;
 			else {
 				CAM_DBG(CAM_ISP, "out_res_type %d",
 				out_port->res_type);
@@ -1749,9 +1777,10 @@
 	*rdi_count = rdi_num;
 	*ppp_count = ppp_num;
 	*ife_rd_count = ife_rd_num;
+	*lcr_count = lcr_num;
 
-	CAM_DBG(CAM_ISP, "rdi: %d ipp: %d ppp: %d ife_rd: %d",
-		rdi_num, ipp_num, ppp_num, ife_rd_num);
+	CAM_DBG(CAM_ISP, "rdi: %d ipp: %d ppp: %d ife_rd: %d lcr: %d",
+		rdi_num, ipp_num, ppp_num, ife_rd_num, lcr_num);
 
 	return 0;
 }
@@ -1767,6 +1796,7 @@
 	int rdi_count                             = 0;
 	int ppp_count                             = 0;
 	int ife_rd_count                          = 0;
+	int lcr_count                             = 0;
 
 	is_dual_vfe = in_port->usage_type;
 
@@ -1777,21 +1807,23 @@
 		goto err;
 	}
 
-	cam_ife_hw_mgr_preprocess_port(ife_ctx, in_port,
-		&ipp_count, &rdi_count, &ppp_count, &ife_rd_count);
+	cam_ife_hw_mgr_preprocess_port(ife_ctx, in_port, &ipp_count,
+		&rdi_count, &ppp_count, &ife_rd_count, &lcr_count);
 
-	if (!ipp_count && !rdi_count && !ppp_count && !ife_rd_count) {
-		CAM_ERR(CAM_ISP, "No PIX or RDI or PPP or IFE RD resource");
+	if (!ipp_count && !rdi_count && !ppp_count && !ife_rd_count
+		&& !lcr_count) {
+		CAM_ERR(CAM_ISP,
+			"No PIX or RDI or PPP or IFE RD or LCR resource");
 		return -EINVAL;
 	}
 
-	if (ipp_count) {
+	if (ipp_count || lcr_count) {
 		/* get ife csid IPP resource */
 		rc = cam_ife_hw_mgr_acquire_res_ife_csid_pxl(ife_ctx,
 			in_port, true);
 		if (rc) {
 			CAM_ERR(CAM_ISP,
-				"Acquire IFE CSID IPP resource Failed");
+				"Acquire IFE CSID IPP/LCR resource Failed");
 			goto err;
 		}
 	}
@@ -1822,13 +1854,29 @@
 	if (ife_rd_count) {
 		rc = cam_ife_hw_mgr_acquire_res_ife_rd_src(ife_ctx, in_port);
 		rc = cam_ife_hw_mgr_acquire_res_bus_rd(ife_ctx, in_port);
-	} else {
-		rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port);
+
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Acquire IFE RD SRC resource Failed");
+			goto err;
+		}
+	} else if (ipp_count || ppp_count) {
+		rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx,
+			in_port, false);
+
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Acquire IFE IPP/PPP SRC resource Failed");
+			goto err;
+		}
 	}
 
-	if (rc) {
-		CAM_ERR(CAM_ISP, "Acquire IFE SRC resource Failed");
-		goto err;
+	if (lcr_count) {
+		rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port, true);
+
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Acquire IFE LCR SRC resource Failed");
+			goto err;
+		}
 	}
 
 	CAM_DBG(CAM_ISP, "Acquiring IFE OUT resource...");
@@ -1838,7 +1886,7 @@
 		goto err;
 	}
 
-	*num_pix_port += ipp_count + ppp_count + ife_rd_count;
+	*num_pix_port += ipp_count + ppp_count + ife_rd_count + lcr_count;
 	*num_rdi_port += rdi_count;
 
 	return 0;
@@ -2650,17 +2698,19 @@
 		cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
 	}
 
-	cam_tasklet_stop(ctx->common.tasklet_info);
-
 	cam_ife_mgr_pause_hw(ctx);
 
-	if (stop_isp->stop_only)
+	if (stop_isp->stop_only) {
+		cam_tasklet_stop(ctx->common.tasklet_info);
 		goto end;
+	}
 
 	if (cam_cdm_stream_off(ctx->cdm_handle))
 		CAM_ERR(CAM_ISP, "CDM stream off failed %d", ctx->cdm_handle);
 
 	cam_ife_hw_mgr_deinit_hw(ctx);
+	cam_tasklet_stop(ctx->common.tasklet_info);
+
 	CAM_DBG(CAM_ISP,
 		"Stop success for ctx id:%d rc :%d", ctx->ctx_index, rc);
 
@@ -2680,7 +2730,7 @@
 }
 
 static int cam_ife_mgr_reset_vfe_hw(struct cam_ife_hw_mgr *hw_mgr,
-			uint32_t hw_idx)
+	uint32_t hw_idx)
 {
 	uint32_t i = 0;
 	struct cam_hw_intf             *vfe_hw_intf;
@@ -3625,7 +3675,8 @@
 					clk_rate = max(clock_config->rdi_hz[j],
 						clk_rate);
 			else
-				if (hw_mgr_res->hw_res[i]) {
+				if (hw_mgr_res->res_id != CAM_ISP_HW_VFE_IN_LCR
+					&& hw_mgr_res->hw_res[i]) {
 					CAM_ERR(CAM_ISP, "Invalid res_id %u",
 						hw_mgr_res->res_id);
 					rc = -EINVAL;
@@ -4116,12 +4167,12 @@
 }
 
 static int cam_ife_mgr_cmd_get_sof_timestamp(
-	struct cam_ife_hw_mgr_ctx      *ife_ctx,
-	uint64_t                       *time_stamp,
-	uint64_t                       *boot_time_stamp)
+	struct cam_ife_hw_mgr_ctx            *ife_ctx,
+	uint64_t                             *time_stamp,
+	uint64_t                             *boot_time_stamp)
 {
-	int rc = -EINVAL;
-	uint32_t i;
+	int                                   rc = -EINVAL;
+	uint32_t                              i;
 	struct cam_ife_hw_mgr_res            *hw_mgr_res;
 	struct cam_hw_intf                   *hw_intf;
 	struct cam_csid_get_time_stamp_args   csid_get_time;
@@ -4166,7 +4217,7 @@
 	}
 
 	if (rc)
-		CAM_ERR(CAM_ISP, "Getting sof time stamp failed");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Getting sof time stamp failed");
 
 	return rc;
 }
@@ -4266,10 +4317,10 @@
 }
 
 static int cam_ife_hw_mgr_do_error_recovery(
-		struct cam_hw_event_recovery_data  *ife_mgr_recovery_data)
+	struct cam_hw_event_recovery_data  *ife_mgr_recovery_data)
 {
-	int32_t rc = 0;
-	struct crm_workq_task        *task = NULL;
+	int32_t                             rc = 0;
+	struct crm_workq_task              *task = NULL;
 	struct cam_hw_event_recovery_data  *recovery_data = NULL;
 
 	recovery_data = kzalloc(sizeof(struct cam_hw_event_recovery_data),
@@ -4284,7 +4335,7 @@
 
 	task = cam_req_mgr_workq_get_task(g_ife_hw_mgr.workq);
 	if (!task) {
-		CAM_ERR(CAM_ISP, "No empty task frame");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No empty task frame");
 		kfree(recovery_data);
 		return -ENOMEM;
 	}
@@ -4303,44 +4354,43 @@
  * is associated with this context. if YES
  *  a. It fills the other cores associated with this context.in
  *      affected_core[]
- *  b. Return 1 if ctx is affected, 0 otherwise
+ *  b. Return true
  */
-static int cam_ife_hw_mgr_is_ctx_affected(
+static bool cam_ife_hw_mgr_is_ctx_affected(
 	struct cam_ife_hw_mgr_ctx   *ife_hwr_mgr_ctx,
-	uint32_t *affected_core, uint32_t size)
+	uint32_t                    *affected_core,
+	uint32_t                     size)
 {
-	int32_t rc = 0;
-	uint32_t i = 0, j = 0;
-	uint32_t max_idx =  ife_hwr_mgr_ctx->num_base;
-	uint32_t ctx_affected_core_idx[CAM_IFE_HW_NUM_MAX] = {0};
 
-	CAM_DBG(CAM_ISP, "max_idx = %d", max_idx);
+	bool                  rc = false;
+	uint32_t              i = 0, j = 0;
+	uint32_t              max_idx =  ife_hwr_mgr_ctx->num_base;
+	uint32_t              ctx_affected_core_idx[CAM_IFE_HW_NUM_MAX] = {0};
 
-	if ((max_idx >= CAM_IFE_HW_NUM_MAX) ||
-		(size > CAM_IFE_HW_NUM_MAX)) {
-		CAM_ERR(CAM_ISP, "invalid parameter = %d", max_idx);
+	CAM_DBG(CAM_ISP, "Enter:max_idx = %d", max_idx);
+
+	if ((max_idx >= CAM_IFE_HW_NUM_MAX) || (size > CAM_IFE_HW_NUM_MAX)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "invalid parameter = %d", max_idx);
 		return rc;
 	}
 
 	for (i = 0; i < max_idx; i++) {
 		if (affected_core[ife_hwr_mgr_ctx->base[i].idx])
-			rc = 1;
+			rc = true;
 		else {
 			ctx_affected_core_idx[j] = ife_hwr_mgr_ctx->base[i].idx;
-			CAM_DBG(CAM_ISP, "Add affected IFE %d for recovery",
-				ctx_affected_core_idx[j]);
 			j = j + 1;
 		}
 	}
 
-	if (rc == 1) {
+	if (rc) {
 		while (j) {
 			if (affected_core[ctx_affected_core_idx[j-1]] != 1)
 				affected_core[ctx_affected_core_idx[j-1]] = 1;
 			j = j - 1;
 		}
 	}
-
+	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
 }
 
@@ -4352,7 +4402,6 @@
  *   b. Notify CTX with fatal error
  */
 static int  cam_ife_hw_mgr_find_affected_ctx(
-	struct cam_ife_hw_mgr_ctx             *curr_ife_hwr_mgr_ctx,
 	struct cam_isp_hw_error_event_data    *error_event_data,
 	uint32_t                               curr_core_idx,
 	struct cam_hw_event_recovery_data     *recovery_data)
@@ -4371,7 +4420,7 @@
 
 	recovery_data->no_of_context = 0;
 	affected_core[curr_core_idx] = 1;
-	ife_hwr_mgr = curr_ife_hwr_mgr_ctx->hw_mgr;
+	ife_hwr_mgr = &g_ife_hw_mgr;
 
 	list_for_each_entry(ife_hwr_mgr_ctx,
 		&ife_hwr_mgr->used_ctx_list, list) {
@@ -4412,333 +4461,114 @@
 	return 0;
 }
 
-static int cam_ife_hw_mgr_get_err_type(
-	void                              *handler_priv,
-	void                              *payload)
+static int cam_ife_hw_mgr_handle_hw_err(
+	void                                *evt_info)
 {
-	struct cam_isp_resource_node         *hw_res_left = NULL;
-	struct cam_isp_resource_node         *hw_res_right = NULL;
-	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
-	struct cam_vfe_top_irq_evt_payload   *evt_payload;
-	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
-	uint32_t  status = 0;
-	uint32_t  core_idx;
-
-	ife_hwr_mgr_ctx = handler_priv;
-	evt_payload = payload;
-
-	if (!evt_payload) {
-		CAM_ERR(CAM_ISP, "No payload");
-		return IRQ_HANDLED;
-	}
-
-	core_idx = evt_payload->core_index;
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
-
-	list_for_each_entry(isp_ife_camif_res,
-		&ife_hwr_mgr_ctx->res_list_ife_src, list) {
-
-		if ((isp_ife_camif_res->res_type ==
-			CAM_IFE_HW_MGR_RES_UNINIT) ||
-			(isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
-			continue;
-
-		hw_res_left = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_LEFT];
-		hw_res_right =
-			isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT];
-
-		CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
-			isp_ife_camif_res->is_dual_vfe);
-
-		/* ERROR check for Left VFE */
-		if (!hw_res_left) {
-			CAM_DBG(CAM_ISP, "VFE(L) Device is NULL");
-			break;
-		}
-
-		CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
-			hw_res_left->hw_intf->hw_idx);
-
-		if (core_idx == hw_res_left->hw_intf->hw_idx) {
-			status = hw_res_left->bottom_half_handler(
-				hw_res_left, evt_payload);
-		}
-
-		if (status)
-			break;
-
-		/* ERROR check for Right  VFE */
-		if (!hw_res_right) {
-			CAM_DBG(CAM_ISP, "VFE(R) Device is NULL");
-			continue;
-		}
-		CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
-			hw_res_right->hw_intf->hw_idx);
-
-		if (core_idx == hw_res_right->hw_intf->hw_idx) {
-			status = hw_res_right->bottom_half_handler(
-				hw_res_right, evt_payload);
-		}
-
-		if (status)
-			break;
-	}
-	CAM_DBG(CAM_ISP, "Exit (status = %d)!", status);
-	return status;
-}
-
-static int  cam_ife_hw_mgr_handle_camif_error(
-	void                              *handler_priv,
-	void                              *payload)
-{
-	int32_t  error_status;
+	struct cam_isp_hw_event_info        *event_info = evt_info;
 	uint32_t core_idx;
-	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
-	struct cam_vfe_top_irq_evt_payload      *evt_payload;
-	struct cam_isp_hw_error_event_data       error_event_data = {0};
-	struct cam_hw_event_recovery_data        recovery_data = {0};
-	int rc = 0;
+	struct cam_isp_hw_error_event_data   error_event_data = {0};
+	struct cam_hw_event_recovery_data    recovery_data = {0};
+	int                                  rc = -EINVAL;
 
-	ife_hwr_mgr_ctx = handler_priv;
-	evt_payload = payload;
-	core_idx = evt_payload->core_index;
+	if (event_info->err_type == CAM_VFE_IRQ_STATUS_VIOLATION)
+		error_event_data.error_type = CAM_ISP_HW_ERROR_VIOLATION;
+	else if (event_info->res_type == CAM_ISP_RESOURCE_VFE_IN)
+		error_event_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW;
+	else if (event_info->res_type == CAM_ISP_RESOURCE_VFE_OUT)
+		error_event_data.error_type = CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
 
-	error_status = cam_ife_hw_mgr_get_err_type(ife_hwr_mgr_ctx,
-		evt_payload);
-	if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending)) {
-		rc = error_status;
-		goto end;
-	}
+	core_idx = event_info->hw_idx;
 
-	switch (error_status) {
-	case CAM_ISP_HW_ERROR_OVERFLOW:
-	case CAM_ISP_HW_ERROR_P2I_ERROR:
-	case CAM_ISP_HW_ERROR_VIOLATION:
-		CAM_ERR(CAM_ISP, "Enter: error_type (%d)", error_status);
-		rc = error_status;
-		if (g_ife_hw_mgr.debug_cfg.enable_recovery)
-			error_event_data.recovery_enabled = true;
+	if (g_ife_hw_mgr.debug_cfg.enable_recovery)
+		error_event_data.recovery_enabled = true;
+	else
+		error_event_data.recovery_enabled = false;
 
-		error_event_data.error_type =
-				CAM_ISP_HW_ERROR_OVERFLOW;
+	rc = cam_ife_hw_mgr_find_affected_ctx(&error_event_data,
+		core_idx, &recovery_data);
 
-		cam_ife_hw_mgr_find_affected_ctx(ife_hwr_mgr_ctx,
-			&error_event_data,
-			core_idx,
-			&recovery_data);
+	if (event_info->res_type == CAM_ISP_RESOURCE_VFE_OUT)
+		return rc;
 
-		if (!g_ife_hw_mgr.debug_cfg.enable_recovery) {
-			CAM_DBG(CAM_ISP, "recovery is not enabled");
-			break;
-		}
-
+	if (g_ife_hw_mgr.debug_cfg.enable_recovery) {
 		CAM_DBG(CAM_ISP, "IFE Mgr recovery is enabled");
+
 		/* Trigger for recovery */
-		recovery_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW;
+		if (event_info->err_type == CAM_VFE_IRQ_STATUS_VIOLATION)
+			recovery_data.error_type = CAM_ISP_HW_ERROR_VIOLATION;
+		else
+			recovery_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW;
 		cam_ife_hw_mgr_do_error_recovery(&recovery_data);
-		break;
-	default:
-		CAM_DBG(CAM_ISP, "No error (%d)", error_status);
-		break;
+	} else {
+		CAM_DBG(CAM_ISP, "recovery is not enabled");
+		rc = 0;
 	}
 
-end:
 	return rc;
 }
 
-/*
- * DUAL VFE is valid for PIX processing path
- * This function assumes hw_res[0] is master in case
- * of dual VFE.
- * RDI path does not support DUAl VFE
- */
-static int cam_ife_hw_mgr_handle_reg_update(
-	void                              *handler_priv,
-	void                              *payload)
+static int cam_ife_hw_mgr_handle_hw_rup(
+	void                                    *ctx,
+	void                                    *evt_info)
 {
-	struct cam_isp_resource_node            *hw_res;
-	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
-	struct cam_vfe_top_irq_evt_payload      *evt_payload;
-	struct cam_ife_hw_mgr_res               *ife_src_res = NULL;
+	struct cam_isp_hw_event_info            *event_info = evt_info;
+	struct cam_ife_hw_mgr_ctx               *ife_hw_mgr_ctx = ctx;
 	cam_hw_event_cb_func                     ife_hwr_irq_rup_cb;
 	struct cam_isp_hw_reg_update_event_data  rup_event_data;
-	uint32_t  core_idx;
-	uint32_t  rup_status = -EINVAL;
 
-	CAM_DBG(CAM_ISP, "Enter");
-
-	ife_hwr_mgr_ctx = handler_priv;
-	evt_payload = payload;
-
-	if (!handler_priv || !payload) {
-		CAM_ERR(CAM_ISP, "Invalid Parameter");
-		return -EPERM;
-	}
-
-	core_idx = evt_payload->core_index;
 	ife_hwr_irq_rup_cb =
-		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
+		ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
 
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_REG_UPDATE;
-	list_for_each_entry(ife_src_res,
-			&ife_hwr_mgr_ctx->res_list_ife_src, list) {
+	switch (event_info->res_id) {
+	case CAM_ISP_HW_VFE_IN_CAMIF:
+		if (ife_hw_mgr_ctx->is_dual)
+			if (event_info->hw_idx != 1)
+				break;
 
-		if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
-			continue;
-
-		CAM_DBG(CAM_ISP, "resource id = %d, curr_core_idx = %d",
-			 ife_src_res->res_id, core_idx);
-		switch (ife_src_res->res_id) {
-		case CAM_ISP_HW_VFE_IN_PDLIB:
+		if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
 			break;
-		case CAM_ISP_HW_VFE_IN_CAMIF:
-		case CAM_ISP_HW_VFE_IN_RD:
-			if (ife_src_res->is_dual_vfe)
-				/* It checks for slave core RUP ACK*/
-				hw_res = ife_src_res->hw_res[1];
-			else
-				hw_res = ife_src_res->hw_res[0];
+		ife_hwr_irq_rup_cb(ife_hw_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data);
+		break;
 
-			if (!hw_res) {
-				CAM_ERR(CAM_ISP, "CAMIF device is NULL");
-				break;
-			}
-			CAM_DBG(CAM_ISP,
-				"current_core_id = %d , core_idx res = %d",
-				 core_idx, hw_res->hw_intf->hw_idx);
-
-			if (core_idx == hw_res->hw_intf->hw_idx) {
-				rup_status = hw_res->bottom_half_handler(
-					hw_res, evt_payload);
-			}
-
-			if (ife_src_res->is_dual_vfe) {
-				hw_res = ife_src_res->hw_res[0];
-				if (core_idx == hw_res->hw_intf->hw_idx) {
-					hw_res->bottom_half_handler(
-						hw_res, evt_payload);
-				}
-			}
-
-			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-				break;
-
-			if (!rup_status) {
-				ife_hwr_irq_rup_cb(
-					ife_hwr_mgr_ctx->common.cb_priv,
-					CAM_ISP_HW_EVENT_REG_UPDATE,
-					&rup_event_data);
-			}
+	case CAM_ISP_HW_VFE_IN_RDI0:
+	case CAM_ISP_HW_VFE_IN_RDI1:
+	case CAM_ISP_HW_VFE_IN_RDI2:
+	case CAM_ISP_HW_VFE_IN_RDI3:
+		if (!ife_hw_mgr_ctx->is_rdi_only_context)
 			break;
-
-		case CAM_ISP_HW_VFE_IN_RDI0:
-		case CAM_ISP_HW_VFE_IN_RDI1:
-		case CAM_ISP_HW_VFE_IN_RDI2:
-		case CAM_ISP_HW_VFE_IN_RDI3:
-			hw_res = ife_src_res->hw_res[0];
-
-			if (!hw_res) {
-				CAM_ERR(CAM_ISP, "RDI Device is NULL");
-				break;
-			}
-
-			if (core_idx == hw_res->hw_intf->hw_idx)
-				rup_status = hw_res->bottom_half_handler(
-					hw_res, evt_payload);
-
-			if (ife_hwr_mgr_ctx->is_rdi_only_context == 0 &&
-				!ife_hwr_mgr_ctx->is_fe_enable)
-				continue;
-
-			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-				break;
-			if (!rup_status) {
-				/* Send the Reg update hw event */
-				ife_hwr_irq_rup_cb(
-					ife_hwr_mgr_ctx->common.cb_priv,
-					CAM_ISP_HW_EVENT_REG_UPDATE,
-					&rup_event_data);
-			}
+		if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
 			break;
-		default:
-			CAM_ERR(CAM_ISP, "Invalid resource id (%d)",
-				ife_src_res->res_id);
-		}
+		ife_hwr_irq_rup_cb(ife_hw_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data);
+		break;
 
+	case CAM_ISP_HW_VFE_IN_PDLIB:
+	case CAM_ISP_HW_VFE_IN_LCR:
+		break;
+	default:
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d",
+			event_info->res_id);
+		break;
 	}
 
-	if (!rup_status)
-		CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
-
-	return 0;
-}
-
-static int cam_ife_hw_mgr_handle_reg_update_in_bus(
-	void                              *handler_priv,
-	void                              *payload)
-{
-	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
-	struct cam_vfe_bus_irq_evt_payload      *evt_payload;
-	cam_hw_event_cb_func                     ife_hwr_irq_rup_cb;
-	struct cam_isp_hw_reg_update_event_data  rup_event_data;
-	uint32_t                                 core_idx;
-	struct cam_ife_hw_mgr_res               *isp_ife_out_res;
-	struct cam_isp_resource_node            *hw_res_left;
-	uint32_t                                 rup_status = -EINVAL;
-	int                                      i = 0;
-
-	CAM_DBG(CAM_ISP, "Enter");
-
-	ife_hwr_mgr_ctx = handler_priv;
-	evt_payload = payload;
-
-	if (!handler_priv || !payload) {
-		CAM_ERR(CAM_ISP, "Invalid Parameter");
-		return -EPERM;
-	}
-
-	core_idx = evt_payload->core_index;
-	ife_hwr_irq_rup_cb =
-		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
-
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_REG_UPDATE;
-	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
-		isp_ife_out_res = &ife_hwr_mgr_ctx->res_list_ife_out[i];
-		if (isp_ife_out_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
-			continue;
-
-		hw_res_left = isp_ife_out_res->hw_res[0];
-		if (hw_res_left && (evt_payload->core_index ==
-			hw_res_left->hw_intf->hw_idx)) {
-			rup_status = hw_res_left->bottom_half_handler(
-				hw_res_left, evt_payload);
-
-			if (rup_status == 0)
-				break;
-		}
-	}
-
-	if (!rup_status) {
-		if (!atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-			ife_hwr_irq_rup_cb(
-				ife_hwr_mgr_ctx->common.cb_priv,
-				CAM_ISP_HW_EVENT_REG_UPDATE,
-				&rup_event_data);
-	}
-
-	CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
+	CAM_DBG(CAM_ISP, "RUP done for VFE source %d",
+		event_info->res_id);
 
 	return 0;
 }
 
 static int cam_ife_hw_mgr_check_irq_for_dual_vfe(
-	struct cam_ife_hw_mgr_ctx   *ife_hw_mgr_ctx,
-	uint32_t                     core_idx0,
-	uint32_t                     core_idx1,
-	uint32_t                     hw_event_type)
+	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx,
+	uint32_t                              hw_event_type)
 {
-	int32_t rc = -1;
-	uint32_t *event_cnt = NULL;
+	int32_t                               rc = -1;
+	uint32_t                             *event_cnt = NULL;
+	uint32_t                              core_idx0 = 0;
+	uint32_t                              core_idx1 = 1;
+
+	if (!ife_hw_mgr_ctx->is_dual)
+		return 0;
 
 	switch (hw_event_type) {
 	case CAM_ISP_HW_EVENT_SOF:
@@ -4754,8 +4584,7 @@
 		return 0;
 	}
 
-	if (event_cnt[core_idx0] ==
-			event_cnt[core_idx1]) {
+	if (event_cnt[core_idx0] == event_cnt[core_idx1]) {
 
 		event_cnt[core_idx0] = 0;
 		event_cnt[core_idx1] = 0;
@@ -4782,707 +4611,235 @@
 	return rc;
 }
 
-static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
-	void                              *handler_priv,
-	void                              *payload)
+static int cam_ife_hw_mgr_handle_hw_epoch(
+	void                                 *ctx,
+	void                                 *evt_info)
 {
-	int32_t rc = -EINVAL;
-	struct cam_isp_resource_node         *hw_res_left;
-	struct cam_isp_resource_node         *hw_res_right;
-	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
-	struct cam_vfe_top_irq_evt_payload   *evt_payload;
-	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
-	cam_hw_event_cb_func                  ife_hwr_irq_epoch_cb;
+	struct cam_isp_hw_event_info         *event_info = evt_info;
+	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx = ctx;
+	cam_hw_event_cb_func                  ife_hw_irq_epoch_cb;
 	struct cam_isp_hw_epoch_event_data    epoch_done_event_data;
-	uint32_t  core_idx;
-	uint32_t  epoch_status = -EINVAL;
-	uint32_t  core_index0;
-	uint32_t  core_index1;
+	int                                   rc = 0;
 
-	CAM_DBG(CAM_ISP, "Enter");
+	ife_hw_irq_epoch_cb =
+		ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EPOCH];
 
-	ife_hwr_mgr_ctx = handler_priv;
-	evt_payload = payload;
-	ife_hwr_irq_epoch_cb =
-		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EPOCH];
-	core_idx = evt_payload->core_index;
-
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_EPOCH;
-
-	list_for_each_entry(isp_ife_camif_res,
-		&ife_hwr_mgr_ctx->res_list_ife_src, list) {
-		if ((isp_ife_camif_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
-			|| (isp_ife_camif_res->res_id >
-			CAM_ISP_HW_VFE_IN_RD)) {
-			continue;
-		}
-
-		hw_res_left = isp_ife_camif_res->hw_res[0];
-		hw_res_right = isp_ife_camif_res->hw_res[1];
-
-		switch (isp_ife_camif_res->is_dual_vfe) {
-		/* Handling Single VFE Scenario */
-		case 0:
-			/* EPOCH check for Left side VFE */
-			if (!hw_res_left) {
-				CAM_ERR(CAM_ISP, "Left Device is NULL");
+	switch (event_info->res_id) {
+	case CAM_ISP_HW_VFE_IN_CAMIF:
+		ife_hw_mgr_ctx->epoch_cnt[event_info->hw_idx]++;
+		rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hw_mgr_ctx,
+			CAM_ISP_HW_EVENT_EPOCH);
+		if (!rc) {
+			if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
 				break;
-			}
-
-			if (core_idx == hw_res_left->hw_intf->hw_idx) {
-				epoch_status = hw_res_left->bottom_half_handler(
-					hw_res_left, evt_payload);
-				if (atomic_read(
-					&ife_hwr_mgr_ctx->overflow_pending))
-					break;
-				if (!epoch_status)
-					ife_hwr_irq_epoch_cb(
-						ife_hwr_mgr_ctx->common.cb_priv,
-						CAM_ISP_HW_EVENT_EPOCH,
-						&epoch_done_event_data);
-			}
-
-			break;
-
-		/* Handling Dual VFE Scenario */
-		case 1:
-			/* SOF check for Left side VFE (Master)*/
-
-			if ((!hw_res_left) || (!hw_res_right)) {
-				CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
-				break;
-			}
-			if (core_idx == hw_res_left->hw_intf->hw_idx) {
-				epoch_status = hw_res_left->bottom_half_handler(
-					hw_res_left, evt_payload);
-
-				if (!epoch_status)
-					ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
-				else
-					break;
-			}
-
-			/* SOF check for Right side VFE */
-			if (core_idx == hw_res_right->hw_intf->hw_idx) {
-				epoch_status =
-					hw_res_right->bottom_half_handler(
-					hw_res_right, evt_payload);
-
-				if (!epoch_status)
-					ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
-				else
-					break;
-			}
-
-			core_index0 = hw_res_left->hw_intf->hw_idx;
-			core_index1 = hw_res_right->hw_intf->hw_idx;
-
-			rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(
-					ife_hwr_mgr_ctx,
-					core_index0,
-					core_index1,
-					evt_payload->evt_id);
-
-			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-				break;
-			if (!rc)
-				ife_hwr_irq_epoch_cb(
-					ife_hwr_mgr_ctx->common.cb_priv,
-					CAM_ISP_HW_EVENT_EPOCH,
-					&epoch_done_event_data);
-
-			break;
-
-		/* Error */
-		default:
-			CAM_ERR(CAM_ISP, "error with hw_res");
-
+			ife_hw_irq_epoch_cb(ife_hw_mgr_ctx->common.cb_priv,
+				CAM_ISP_HW_EVENT_EPOCH, &epoch_done_event_data);
 		}
-	}
-
-	if (!epoch_status)
-		CAM_DBG(CAM_ISP, "Exit epoch_status = %d", epoch_status);
-
-	return 0;
-}
-
-static int cam_ife_hw_mgr_process_camif_sof(
-	struct cam_ife_hw_mgr_res            *isp_ife_camif_res,
-	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx,
-	struct cam_vfe_top_irq_evt_payload   *evt_payload)
-{
-	struct cam_isp_resource_node         *hw_res_left = NULL;
-	struct cam_isp_resource_node         *hw_res_right = NULL;
-	int32_t rc = -EINVAL;
-	uint32_t  core_idx;
-	uint32_t  sof_status = 0;
-	uint32_t  core_index0;
-	uint32_t  core_index1;
-
-	CAM_DBG(CAM_ISP, "Enter");
-	core_idx = evt_payload->core_index;
-	hw_res_left = isp_ife_camif_res->hw_res[0];
-	hw_res_right = isp_ife_camif_res->hw_res[1];
-	CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
-		isp_ife_camif_res->is_dual_vfe);
-
-	switch (isp_ife_camif_res->is_dual_vfe) {
-	/* Handling Single VFE Scenario */
-	case 0:
-		/* SOF check for Left side VFE */
-		if (!hw_res_left) {
-			CAM_ERR(CAM_ISP, "VFE Device is NULL");
-			break;
-		}
-		CAM_DBG(CAM_ISP, "curr_core_idx = %d,core idx hw = %d",
-			core_idx, hw_res_left->hw_intf->hw_idx);
-
-		if (core_idx == hw_res_left->hw_intf->hw_idx) {
-			sof_status = hw_res_left->bottom_half_handler(
-				hw_res_left, evt_payload);
-			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-				break;
-			if (!sof_status)
-				rc = 0;
-		}
-
 		break;
 
-	/* Handling Dual VFE Scenario */
-	case 1:
-		/* SOF check for Left side VFE */
-
-		if (!hw_res_left) {
-			CAM_ERR(CAM_ISP, "VFE Device is NULL");
-			break;
-		}
-		CAM_DBG(CAM_ISP, "curr_core_idx = %d, res hw idx= %d",
-				 core_idx,
-				hw_res_left->hw_intf->hw_idx);
-
-		if (core_idx == hw_res_left->hw_intf->hw_idx) {
-			sof_status = hw_res_left->bottom_half_handler(
-				hw_res_left, evt_payload);
-			if (!sof_status)
-				ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
-			else
-				break;
-		}
-
-		/* SOF check for Right side VFE */
-		if (!hw_res_right) {
-			CAM_ERR(CAM_ISP, "VFE Device is NULL");
-			break;
-		}
-		CAM_DBG(CAM_ISP, "curr_core_idx = %d, ews hw idx= %d",
-				 core_idx,
-				hw_res_right->hw_intf->hw_idx);
-		if (core_idx == hw_res_right->hw_intf->hw_idx) {
-			sof_status = hw_res_right->bottom_half_handler(
-				hw_res_right, evt_payload);
-			if (!sof_status)
-				ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
-			else
-				break;
-		}
-
-		core_index0 = hw_res_left->hw_intf->hw_idx;
-		core_index1 = hw_res_right->hw_intf->hw_idx;
-
-		if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-			break;
-
-		rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hwr_mgr_ctx,
-			core_index0, core_index1, evt_payload->evt_id);
-
+	case CAM_ISP_HW_VFE_IN_RDI0:
+	case CAM_ISP_HW_VFE_IN_RDI1:
+	case CAM_ISP_HW_VFE_IN_RDI2:
+	case CAM_ISP_HW_VFE_IN_RDI3:
+	case CAM_ISP_HW_VFE_IN_PDLIB:
+	case CAM_ISP_HW_VFE_IN_LCR:
 		break;
 
 	default:
-		CAM_ERR(CAM_ISP, "error with hw_res");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d",
+			event_info->res_id);
 		break;
 	}
 
-	CAM_DBG(CAM_ISP, "Exit (sof_status = %d)", sof_status);
+	CAM_DBG(CAM_ISP, "Epoch for VFE source %d", event_info->res_id);
 
-	return rc;
+	return 0;
 }
 
-static int cam_ife_hw_mgr_handle_sof(
-	void                              *handler_priv,
-	void                              *payload)
+static int cam_ife_hw_mgr_handle_hw_sof(
+	void                                 *ctx,
+	void                                 *evt_info)
 {
-	struct cam_isp_resource_node         *hw_res = NULL;
-	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx;
-	struct cam_vfe_top_irq_evt_payload   *evt_payload;
-	struct cam_ife_hw_mgr_res            *ife_src_res = NULL;
+	struct cam_isp_hw_event_info         *event_info = evt_info;
+	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx = ctx;
 	cam_hw_event_cb_func                  ife_hw_irq_sof_cb;
 	struct cam_isp_hw_sof_event_data      sof_done_event_data;
-	uint32_t  sof_status = 0;
-	bool sof_sent = false;
+	int                                   rc = 0;
 
-	CAM_DBG(CAM_ISP, "Enter");
-
-	ife_hw_mgr_ctx = handler_priv;
-	evt_payload = payload;
-	if (!evt_payload) {
-		CAM_ERR(CAM_ISP, "no payload");
-		return IRQ_HANDLED;
-	}
 	ife_hw_irq_sof_cb =
 		ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];
 
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;
+	switch (event_info->res_id) {
+	case CAM_ISP_HW_VFE_IN_CAMIF:
+	case CAM_ISP_HW_VFE_IN_RD:
+		ife_hw_mgr_ctx->sof_cnt[event_info->hw_idx]++;
+		rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hw_mgr_ctx,
+			CAM_ISP_HW_EVENT_SOF);
+		if (!rc) {
+			cam_ife_mgr_cmd_get_sof_timestamp(ife_hw_mgr_ctx,
+				&sof_done_event_data.timestamp,
+				&sof_done_event_data.boot_time);
 
-	list_for_each_entry(ife_src_res,
-		&ife_hw_mgr_ctx->res_list_ife_src, list) {
+			if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
+				break;
 
-		if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
-			continue;
-
-		switch (ife_src_res->res_id) {
-		case CAM_ISP_HW_VFE_IN_RDI0:
-		case CAM_ISP_HW_VFE_IN_RDI1:
-		case CAM_ISP_HW_VFE_IN_RDI2:
-		case CAM_ISP_HW_VFE_IN_RDI3:
-			hw_res = ife_src_res->hw_res[0];
-			sof_status = hw_res->bottom_half_handler(
-				hw_res, evt_payload);
-
-			/* check if it is rdi only context */
-			if (ife_hw_mgr_ctx->is_fe_enable ||
-				ife_hw_mgr_ctx->is_rdi_only_context) {
-				if (!sof_status && !sof_sent) {
-					cam_ife_mgr_cmd_get_sof_timestamp(
-						ife_hw_mgr_ctx,
-						&sof_done_event_data.timestamp,
-						&sof_done_event_data.boot_time);
-
-					ife_hw_irq_sof_cb(
-						ife_hw_mgr_ctx->common.cb_priv,
-						CAM_ISP_HW_EVENT_SOF,
-						&sof_done_event_data);
-					CAM_DBG(CAM_ISP, "RDI sof_status = %d",
-						sof_status);
-
-					sof_sent = true;
-				}
-
-			}
-			break;
-
-		case CAM_ISP_HW_VFE_IN_CAMIF:
-		case CAM_ISP_HW_VFE_IN_RD:
-			sof_status = cam_ife_hw_mgr_process_camif_sof(
-				ife_src_res, ife_hw_mgr_ctx, evt_payload);
-			if (!sof_status && !sof_sent) {
-				cam_ife_mgr_cmd_get_sof_timestamp(
-					ife_hw_mgr_ctx,
-					&sof_done_event_data.timestamp,
-					&sof_done_event_data.boot_time);
-
-				ife_hw_irq_sof_cb(
-					ife_hw_mgr_ctx->common.cb_priv,
-					CAM_ISP_HW_EVENT_SOF,
-					&sof_done_event_data);
-				CAM_DBG(CAM_ISP, "sof_status = %d",
-					sof_status);
-
-				sof_sent = true;
-			}
-			break;
-		case CAM_ISP_HW_VFE_IN_PDLIB:
-			break;
-		default:
-			CAM_ERR(CAM_ISP, "Invalid resource id :%d",
-				ife_src_res->res_id);
-			break;
+			ife_hw_irq_sof_cb(ife_hw_mgr_ctx->common.cb_priv,
+				CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
 		}
+		break;
+
+	case CAM_ISP_HW_VFE_IN_RDI0:
+	case CAM_ISP_HW_VFE_IN_RDI1:
+	case CAM_ISP_HW_VFE_IN_RDI2:
+	case CAM_ISP_HW_VFE_IN_RDI3:
+		if (!ife_hw_mgr_ctx->is_rdi_only_context)
+			break;
+		cam_ife_mgr_cmd_get_sof_timestamp(ife_hw_mgr_ctx,
+			&sof_done_event_data.timestamp,
+			&sof_done_event_data.boot_time);
+		if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
+			break;
+		ife_hw_irq_sof_cb(ife_hw_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
+		break;
+
+	case CAM_ISP_HW_VFE_IN_PDLIB:
+	case CAM_ISP_HW_VFE_IN_LCR:
+		break;
+
+	default:
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d",
+			event_info->res_id);
+		break;
 	}
 
+	CAM_DBG(CAM_ISP, "SOF for VFE source %d", event_info->res_id);
+
 	return 0;
 }
 
-static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
-	void                              *handler_priv,
-	void                              *payload)
+static int cam_ife_hw_mgr_handle_hw_eof(
+	void                                 *ctx,
+	void                                 *evt_info)
 {
-	int32_t rc = -EINVAL;
-	struct cam_isp_resource_node         *hw_res_left = NULL;
-	struct cam_isp_resource_node         *hw_res_right = NULL;
-	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
-	struct cam_vfe_top_irq_evt_payload   *evt_payload;
-	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
-	cam_hw_event_cb_func                  ife_hwr_irq_eof_cb;
+	struct cam_isp_hw_event_info         *event_info = evt_info;
+	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx = ctx;
+	cam_hw_event_cb_func                  ife_hw_irq_eof_cb;
 	struct cam_isp_hw_eof_event_data      eof_done_event_data;
-	uint32_t  core_idx;
-	uint32_t  eof_status = 0;
-	uint32_t  core_index0;
-	uint32_t  core_index1;
+	int                                   rc = 0;
 
-	CAM_DBG(CAM_ISP, "Enter");
+	ife_hw_irq_eof_cb =
+		ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EOF];
 
-	ife_hwr_mgr_ctx = handler_priv;
-	evt_payload = payload;
-	if (!evt_payload) {
-		pr_err("%s: no payload\n", __func__);
-		return IRQ_HANDLED;
-	}
-	core_idx = evt_payload->core_index;
-	ife_hwr_irq_eof_cb =
-		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EOF];
-
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_EOF;
-
-	list_for_each_entry(isp_ife_camif_res,
-		&ife_hwr_mgr_ctx->res_list_ife_src, list) {
-
-		if (isp_ife_camif_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
-			continue;
-
-		hw_res_left = isp_ife_camif_res->hw_res[0];
-		hw_res_right = isp_ife_camif_res->hw_res[1];
-
-		CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
-				isp_ife_camif_res->is_dual_vfe);
-		switch (isp_ife_camif_res->is_dual_vfe) {
-		/* Handling Single VFE Scenario */
-		case 0:
-			/* EOF check for Left side VFE */
-			if (!hw_res_left) {
-				pr_err("%s: VFE Device is NULL\n",
-					__func__);
+	switch (event_info->res_id) {
+	case CAM_ISP_HW_VFE_IN_CAMIF:
+		ife_hw_mgr_ctx->eof_cnt[event_info->hw_idx]++;
+		rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hw_mgr_ctx,
+			CAM_ISP_HW_EVENT_EOF);
+		if (!rc) {
+			if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
 				break;
-			}
-			CAM_DBG(CAM_ISP, "curr_core_idx = %d, core idx hw = %d",
-					core_idx, hw_res_left->hw_intf->hw_idx);
-
-			if (core_idx == hw_res_left->hw_intf->hw_idx) {
-				eof_status = hw_res_left->bottom_half_handler(
-					hw_res_left, evt_payload);
-				if (atomic_read(
-					&ife_hwr_mgr_ctx->overflow_pending))
-					break;
-				if (!eof_status)
-					ife_hwr_irq_eof_cb(
-						ife_hwr_mgr_ctx->common.cb_priv,
-						CAM_ISP_HW_EVENT_EOF,
-						&eof_done_event_data);
-			}
-
-			break;
-		/* Handling dual VFE Scenario */
-		case 1:
-			if ((!hw_res_left) || (!hw_res_right)) {
-				CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
-				break;
-			}
-			if (core_idx == hw_res_left->hw_intf->hw_idx) {
-				eof_status = hw_res_left->bottom_half_handler(
-					hw_res_left, evt_payload);
-
-				if (!eof_status)
-					ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
-				else
-					break;
-			}
-
-			/* EOF check for Right side VFE */
-			if (core_idx == hw_res_right->hw_intf->hw_idx) {
-				eof_status = hw_res_right->bottom_half_handler(
-					hw_res_right, evt_payload);
-
-				if (!eof_status)
-					ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
-				else
-					break;
-			}
-
-			core_index0 = hw_res_left->hw_intf->hw_idx;
-			core_index1 = hw_res_right->hw_intf->hw_idx;
-
-			rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(
-					ife_hwr_mgr_ctx,
-					core_index0,
-					core_index1,
-					evt_payload->evt_id);
-
-			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-				break;
-
-			if (!rc)
-				ife_hwr_irq_eof_cb(
-					ife_hwr_mgr_ctx->common.cb_priv,
-					CAM_ISP_HW_EVENT_EOF,
-					&eof_done_event_data);
-
-			break;
-
-		default:
-			CAM_ERR(CAM_ISP, "error with hw_res");
+			ife_hw_irq_eof_cb(ife_hw_mgr_ctx->common.cb_priv,
+				CAM_ISP_HW_EVENT_EOF, &eof_done_event_data);
 		}
+		break;
+
+	case CAM_ISP_HW_VFE_IN_RDI0:
+	case CAM_ISP_HW_VFE_IN_RDI1:
+	case CAM_ISP_HW_VFE_IN_RDI2:
+	case CAM_ISP_HW_VFE_IN_RDI3:
+	case CAM_ISP_HW_VFE_IN_PDLIB:
+	case CAM_ISP_HW_VFE_IN_LCR:
+		break;
+
+	default:
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid res_id: %d",
+			event_info->res_id);
+		break;
 	}
 
-	CAM_DBG(CAM_ISP, "Exit (eof_status = %d)", eof_status);
+	CAM_DBG(CAM_ISP, "EOF for out_res->res_id: 0x%x",
+		event_info->res_id);
 
 	return 0;
 }
 
-
-static int cam_ife_hw_mgr_handle_buf_done_for_hw_res(
-	void                              *handler_priv,
-	void                              *payload)
-
+static int cam_ife_hw_mgr_handle_hw_buf_done(
+	void                                *ctx,
+	void                                *evt_info)
 {
-	int32_t                              buf_done_status = 0;
-	int32_t                              i;
-	int32_t                              rc = 0;
 	cam_hw_event_cb_func                 ife_hwr_irq_wm_done_cb;
-	struct cam_isp_resource_node        *hw_res_left = NULL;
-	struct cam_ife_hw_mgr_ctx           *ife_hwr_mgr_ctx = NULL;
-	struct cam_vfe_bus_irq_evt_payload  *evt_payload = payload;
-	struct cam_ife_hw_mgr_res           *isp_ife_out_res = NULL;
-	struct cam_hw_event_recovery_data    recovery_data;
+	struct cam_ife_hw_mgr_ctx           *ife_hw_mgr_ctx = ctx;
 	struct cam_isp_hw_done_event_data    buf_done_event_data = {0};
-	struct cam_isp_hw_error_event_data   error_event_data = {0};
-	uint32_t  error_resc_handle[CAM_IFE_HW_OUT_RES_MAX];
-	uint32_t  num_of_error_handles = 0;
+	struct cam_isp_hw_event_info        *event_info = evt_info;
 
-	CAM_DBG(CAM_ISP, "Enter");
-
-	ife_hwr_mgr_ctx = evt_payload->ctx;
 	ife_hwr_irq_wm_done_cb =
-		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
+		ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
 
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
+	buf_done_event_data.num_handles = 1;
+	buf_done_event_data.resource_handle[0] = event_info->res_id;
 
-	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
-		isp_ife_out_res = &ife_hwr_mgr_ctx->res_list_ife_out[i];
+	if (atomic_read(&ife_hw_mgr_ctx->overflow_pending))
+		return 0;
 
-		if (isp_ife_out_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
-			continue;
-
-		hw_res_left = isp_ife_out_res->hw_res[0];
-
-		/*
-		 * DUAL VFE: Index 0 is always a master. In case of composite
-		 * Error, if the error is not in master, it needs to be checked
-		 * in slave (for debuging purpose only) For other cases:
-		 * Index zero is valid
-		 */
-
-		if (hw_res_left && (evt_payload->core_index ==
-			hw_res_left->hw_intf->hw_idx))
-			buf_done_status = hw_res_left->bottom_half_handler(
-				hw_res_left, evt_payload);
-		else
-			continue;
-
-		switch (buf_done_status) {
-		case CAM_VFE_IRQ_STATUS_ERR_COMP:
-			/*
-			 * Write interface can pipeline upto 2 buffer done
-			 * strobes from each write client. If any of the client
-			 * triggers a third buffer done strobe before a
-			 * composite interrupt based on the first buffer doneis
-			 * triggered an error irq is set. This scenario can
-			 * only happen if a client is 3 frames ahead of the
-			 * other clients enabled in the same composite mask.
-			 */
-		case CAM_VFE_IRQ_STATUS_COMP_OWRT:
-			/*
-			 * It is an indication that bandwidth is not sufficient
-			 * to generate composite done irq within the VBI time.
-			 */
-
-			error_resc_handle[num_of_error_handles++] =
-					isp_ife_out_res->res_id;
-
-			if (num_of_error_handles > 0) {
-				error_event_data.error_type =
-					CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
-				goto err;
-			}
-
-			break;
-		case CAM_VFE_IRQ_STATUS_ERR:
-			break;
-		case CAM_VFE_IRQ_STATUS_SUCCESS:
-			buf_done_event_data.num_handles = 1;
-			buf_done_event_data.resource_handle[0] =
-				isp_ife_out_res->res_id;
-
-			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-				break;
-			/* Report for Successful buf_done event if any */
-			if (buf_done_event_data.num_handles > 0 &&
-				ife_hwr_irq_wm_done_cb) {
-				CAM_DBG(CAM_ISP, "notify isp context");
-				ife_hwr_irq_wm_done_cb(
-					ife_hwr_mgr_ctx->common.cb_priv,
-					CAM_ISP_HW_EVENT_DONE,
-					&buf_done_event_data);
-			}
-
-			break;
-		default:
-			/* Do NOTHING */
-			error_resc_handle[num_of_error_handles++] =
-				isp_ife_out_res->res_id;
-			if (num_of_error_handles > 0) {
-				error_event_data.error_type =
-					CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
-				goto err;
-			}
-			break;
-		}
-		if (!buf_done_status)
-			CAM_DBG(CAM_ISP,
-				"buf_done status:(%d),out_res->res_id: 0x%x",
-				buf_done_status, isp_ife_out_res->res_id);
+	if (buf_done_event_data.num_handles > 0 && ife_hwr_irq_wm_done_cb) {
+		CAM_DBG(CAM_ISP, "Notify ISP context");
+		ife_hwr_irq_wm_done_cb(ife_hw_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_DONE, &buf_done_event_data);
 	}
 
-	return rc;
+	CAM_DBG(CAM_ISP, "Buf done for out_res->res_id: 0x%x",
+		event_info->res_id);
 
-err:
-	/*
-	 * Report for error if any.
-	 * For the first phase, Error is reported as overflow, for all
-	 * the affected context and any successful buf_done event is not
-	 * reported.
-	 */
-	rc = cam_ife_hw_mgr_find_affected_ctx(ife_hwr_mgr_ctx,
-		&error_event_data, evt_payload->core_index,
-		&recovery_data);
-
-	/*
-	 * We can temporarily return from here as
-	 * for the first phase, we are going to reset entire HW.
-	 */
-
-	CAM_DBG(CAM_ISP, "Exit buf_done_status Error = %d",
-		buf_done_status);
-	return rc;
+	return 0;
 }
 
-int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv,
-	void *evt_payload_priv)
+static int cam_ife_hw_mgr_event_handler(
+	void                                *priv,
+	uint32_t                             evt_id,
+	void                                *evt_info)
 {
-	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx = handler_priv;
-	struct cam_vfe_bus_irq_evt_payload      *evt_payload;
-	int rc = -EINVAL;
+	int                                  rc = 0;
 
-	if (!handler_priv)
-		return rc;
+	if (!evt_info)
+		return -EINVAL;
 
-	evt_payload = evt_payload_priv;
-	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)evt_payload->ctx;
+	if (!priv)
+		if (evt_id != CAM_ISP_HW_EVENT_ERROR)
+			return -EINVAL;
 
-	CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core index:0x%x",
-		evt_payload, evt_payload->core_index);
-	CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
-	CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
-	/* WM Done */
-	return cam_ife_hw_mgr_handle_buf_done_for_hw_res(ife_hwr_mgr_ctx,
-		evt_payload_priv);
-}
+	CAM_DBG(CAM_ISP, "Event ID 0x%x", evt_id);
 
-int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)
-{
-	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx = handler_priv;
-	struct cam_vfe_top_irq_evt_payload   *evt_payload;
-	int rc = -EINVAL;
+	switch (evt_id) {
+	case CAM_ISP_HW_EVENT_SOF:
+		rc = cam_ife_hw_mgr_handle_hw_sof(priv, evt_info);
+		break;
 
-	if (!evt_payload_priv)
-		return rc;
+	case CAM_ISP_HW_EVENT_REG_UPDATE:
+		rc = cam_ife_hw_mgr_handle_hw_rup(priv, evt_info);
+		break;
 
-	evt_payload = evt_payload_priv;
-	if (!handler_priv)
-		return rc;
+	case CAM_ISP_HW_EVENT_EPOCH:
+		rc = cam_ife_hw_mgr_handle_hw_epoch(priv, evt_info);
+		break;
 
-	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
+	case CAM_ISP_HW_EVENT_EOF:
+		rc = cam_ife_hw_mgr_handle_hw_eof(priv, evt_info);
+		break;
 
-	CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core_index:%d",
-		(void *)evt_payload,
-		evt_payload->core_index);
-	CAM_DBG(CAM_ISP,
-		"irq_status_0 = 0x%x, irq_status_1 = 0x%x, irq_status_2 = 0x%x ",
-		evt_payload->irq_reg_val[0],
-		evt_payload->irq_reg_val[1],
-		evt_payload->irq_reg_val[2]);
+	case CAM_ISP_HW_EVENT_DONE:
+		rc = cam_ife_hw_mgr_handle_hw_buf_done(priv, evt_info);
+		break;
 
-	/*
-	 * If overflow/overwrite/error/violation are pending
-	 * for this context it needs to be handled remaining
-	 * interrupts are ignored.
-	 */
-	rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
-		evt_payload_priv);
+	case CAM_ISP_HW_EVENT_ERROR:
+		rc = cam_ife_hw_mgr_handle_hw_err(evt_info);
+		break;
 
-	if (rc) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"Encountered Error (%d), ignoring other irqs",
-			rc);
-		goto put_payload;
+	default:
+		CAM_ERR(CAM_ISP, "Invalid event ID %d", evt_id);
+		break;
 	}
 
-	CAM_DBG(CAM_ISP, "Calling EOF");
-	cam_ife_hw_mgr_handle_eof_for_camif_hw_res(ife_hwr_mgr_ctx,
-		evt_payload_priv);
-
-	CAM_DBG(CAM_ISP, "Calling SOF");
-	/* SOF IRQ */
-	cam_ife_hw_mgr_handle_sof(ife_hwr_mgr_ctx,
-		evt_payload_priv);
-
-	if (evt_payload->hw_version != CAM_CPAS_TITAN_480_V100) {
-		CAM_DBG(CAM_ISP, "Calling RUP");
-		/* REG UPDATE */
-		cam_ife_hw_mgr_handle_reg_update(ife_hwr_mgr_ctx,
-		evt_payload_priv);
-	}
-
-	CAM_DBG(CAM_ISP, "Calling EPOCH");
-	/* EPOCH IRQ */
-	cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(ife_hwr_mgr_ctx,
-		evt_payload_priv);
-
-put_payload:
-	cam_vfe_put_evt_payload(evt_payload->core_info, &evt_payload);
-	return IRQ_HANDLED;
-}
-
-
-int cam_ife_mgr_do_tasklet_reg_update(
-	void *handler_priv, void *evt_payload_priv)
-{
-	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx = handler_priv;
-	struct cam_vfe_bus_irq_evt_payload   *evt_payload;
-	int                                   rc = -EINVAL;
-
-	evt_payload = evt_payload_priv;
-
-	if (!evt_payload_priv || !handler_priv) {
-		CAM_ERR(CAM_ISP, "Invalid handle:%pK or event payload:%pK",
-			handler_priv, evt_payload_priv);
-		return rc;
-	}
-	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
-
-	CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core_index:%d",
-		(void *)evt_payload,
-		evt_payload->core_index);
-	CAM_DBG(CAM_ISP,
-		"bus_irq_status_0: = 0x%x, bus_irq_status_1: = 0x%x, calling RUP",
-		evt_payload->irq_reg_val[0],
-		evt_payload->irq_reg_val[1]);
-	/* REG UPDATE */
-	rc = cam_ife_hw_mgr_handle_reg_update_in_bus(ife_hwr_mgr_ctx,
-		evt_payload_priv);
-
-	if (rc)
-		CAM_ERR(CAM_ISP,
-			"Encountered Error, rc = %d", rc);
-
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index f9e44c6..711f279 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -124,6 +124,7 @@
  * @config_done_complete    indicator for configuration complete
  * @init_done               indicate whether init hw is done
  * @is_fe_enable            indicate whether fetch engine\read path is enabled
+ * @is_dual                 indicate whether context is in dual VFE mode
  */
 struct cam_ife_hw_mgr_ctx {
 	struct list_head                list;
@@ -160,6 +161,7 @@
 	struct completion               config_done_complete;
 	bool                            init_done;
 	bool                            is_fe_enable;
+	bool                            is_dual;
 };
 
 /**
@@ -210,38 +212,4 @@
  */
 int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl);
 
-/**
- * cam_ife_mgr_do_tasklet_buf_done()
- *
- * @brief:              Main tasklet handle function for the buf done event
- *
- * @handler_priv:       Tasklet information handle
- * @evt_payload_priv:   Event payload for the handler funciton
- *
- */
-int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv, void *evt_payload_priv);
-
-/**
- * cam_ife_mgr_do_tasklet()
- *
- * @brief:              Main tasklet handle function for mux resource events
- *
- * @handler_priv:       Tasklet information handle
- * @evt_payload_priv:   Event payload for the handler funciton
- *
- */
-int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv);
-
-/**
- * cam_ife_mgr_do_tasklet_reg_update()
- *
- * @brief:              Tasklet handle function for reg update
- *
- * @handler_priv:       Tasklet information handle
- * @evt_payload_priv:   Event payload for the handler funciton
- *
- */
-int cam_ife_mgr_do_tasklet_reg_update(void *handler_priv,
-	void *evt_payload_priv);
-
 #endif /* _CAM_IFE_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
index 293b4e2..5145dad 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -24,6 +24,7 @@
  * @list:                   list_head member for each entry in queue
  * @payload:                Payload structure for the event. This will be
  *                          passed to the handler function
+ * @handler_priv:           Private data passed at event subscribe
  * @bottom_half_handler:    Function pointer for event handler in bottom
  *                          half context
  *
@@ -31,6 +32,7 @@
 struct cam_tasklet_queue_cmd {
 	struct list_head                   list;
 	void                              *payload;
+	void                              *handler_priv;
 	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler;
 };
 
@@ -203,6 +205,7 @@
 	CAM_DBG(CAM_ISP, "Enqueue tasklet cmd");
 	tasklet_cmd->bottom_half_handler = bottom_half_handler;
 	tasklet_cmd->payload = evt_payload_priv;
+	tasklet_cmd->handler_priv = handler_priv;
 	spin_lock_irqsave(&tasklet->tasklet_lock, flags);
 	list_add_tail(&tasklet_cmd->list,
 		&tasklet->used_cmd_list);
@@ -317,7 +320,7 @@
 	tasklet_info = (struct cam_tasklet_info *)data;
 
 	while (!cam_tasklet_dequeue_cmd(tasklet_info, &tasklet_cmd)) {
-		tasklet_cmd->bottom_half_handler(tasklet_info->ctx_priv,
+		tasklet_cmd->bottom_half_handler(tasklet_cmd->handler_priv,
 			tasklet_cmd->payload);
 		cam_tasklet_put_cmd(tasklet_info, (void **)(&tasklet_cmd));
 	}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 9056718..e91092a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -1052,7 +1052,7 @@
 	CAM_DBG(CAM_ISP, "CSID:%d init CSID HW",
 		csid_hw->hw_intf->hw_idx);
 
-	clk_lvl = cam_ife_csid_get_vote_level(soc_info, csid_hw->clk_rate);
+	clk_lvl = cam_soc_util_get_vote_level(soc_info, csid_hw->clk_rate);
 	CAM_DBG(CAM_ISP, "CSID clock lvl %u", clk_lvl);
 
 	rc = cam_ife_csid_enable_soc_resources(soc_info, clk_lvl);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
index a35d8951..263a464 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 #include <linux/slab.h>
 #include "cam_ife_csid_soc.h"
@@ -110,7 +110,7 @@
 }
 
 int cam_ife_csid_enable_soc_resources(
-	struct cam_hw_soc_info *soc_info, uint32_t clk_lvl)
+	struct cam_hw_soc_info *soc_info, enum cam_vote_level clk_level)
 {
 	int rc = 0;
 	struct cam_csid_soc_private       *soc_private;
@@ -135,7 +135,7 @@
 	}
 
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
-		clk_lvl, true);
+		clk_level, true);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "enable platform failed");
 		goto stop_cpas;
@@ -228,24 +228,3 @@
 
 	return rc;
 }
-
-uint32_t cam_ife_csid_get_vote_level(struct cam_hw_soc_info *soc_info,
-	uint64_t clock_rate)
-{
-	int i = 0;
-
-	if (!clock_rate)
-		return CAM_SVS_VOTE;
-
-	for (i = 0; i < CAM_MAX_VOTE; i++) {
-		if (soc_info->clk_rate[i][soc_info->num_clk - 1] >=
-			clock_rate) {
-			CAM_DBG(CAM_ISP,
-				"Clock rate %lld, selected clock level %d",
-				clock_rate, i);
-			return i;
-		}
-	}
-
-	return CAM_TURBO_VOTE;
-}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index 534ce84..3bdd4fb 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -8,9 +8,9 @@
 
 #include <linux/completion.h>
 #include "cam_hw.h"
-#include <uapi/media/cam_isp.h>
 #include "cam_soc_util.h"
 #include "cam_irq_controller.h"
+#include "cam_hw_intf.h"
 #include <uapi/media/cam_isp.h>
 
 /*
@@ -153,6 +153,24 @@
 };
 
 /*
+ * struct cam_isp_hw_event_info:
+ *
+ * @Brief:          Structure to pass event details to hw mgr
+ *
+ * @res_type:       Type of IFE resource
+ * @res_id:         Unique resource ID
+ * @hw_idx:         IFE hw index
+ * @err_type:       Error type if any
+ *
+ */
+struct cam_isp_hw_event_info {
+	enum cam_isp_resource_type     res_type;
+	uint32_t                       res_id;
+	uint32_t                       hw_idx;
+	uint32_t                       err_type;
+};
+
+/*
  * struct cam_isp_hw_cmd_buf_update:
  *
  * @Brief:           Contain the new created command buffer information
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index 8c985bc..14bced0 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -112,7 +112,6 @@
  *                           (Default is Master in case of Single VFE)
  * @dual_slave_core:         If Master and Slave exists, HW Index of Slave
  * @cdm_ops:                 CDM operations
- * @ctx:                     Context data
  */
 struct cam_vfe_hw_vfe_out_acquire_args {
 	struct cam_isp_resource_node      *rsrc_node;
@@ -123,7 +122,6 @@
 	uint32_t                           is_master;
 	uint32_t                           dual_slave_core;
 	struct cam_cdm_utils_ops          *cdm_ops;
-	void                              *ctx;
 };
 
 /*
@@ -153,6 +151,8 @@
  * @tasklet:                 Tasklet to associate with this resource. This is
  *                           used to schedule bottom of IRQ events associated
  *                           with this resource.
+ * @priv:                    Context data
+ * @event_cb:                Callback function to hw mgr in case of hw events
  * @vfe_out:                 Acquire args for VFE_OUT
  * @vfe_bus_rd               Acquire args for VFE_BUS_READ
  * @vfe_in:                  Acquire args for VFE_IN
@@ -160,6 +160,8 @@
 struct cam_vfe_acquire_args {
 	enum cam_isp_resource_type           rsrc_type;
 	void                                *tasklet;
+	void                                *priv;
+	cam_hw_mgr_event_cb_func             event_cb;
 	union {
 		struct cam_vfe_hw_vfe_out_acquire_args  vfe_out;
 		struct cam_vfe_hw_vfe_out_acquire_args  vfe_bus_rd;
@@ -227,24 +229,14 @@
  *                           related to VFE_TOP resources
  *
  * @list:                    list_head node for the payload
- * @core_index:              Index of VFE HW that generated this IRQ event
- * @core_info:               Private data of handler in bottom half context
- * @evt_id:                  IRQ event
  * @irq_reg_val:             IRQ and Error register values, read when IRQ was
  *                           handled
- * @error_type:              Identify different errors
  * @ts:                      Timestamp
- * @hw_version:              CPAS hw version
  */
 struct cam_vfe_top_irq_evt_payload {
-	struct list_head           list;
-	uint32_t                   core_index;
-	void                      *core_info;
-	uint32_t                   evt_id;
-	uint32_t                   irq_reg_val[CAM_IFE_IRQ_REGISTERS_MAX];
-	uint32_t                   error_type;
-	struct cam_isp_timestamp   ts;
-	uint32_t                   hw_version;
+	struct list_head            list;
+	uint32_t                    irq_reg_val[CAM_IFE_IRQ_REGISTERS_MAX];
+	struct cam_isp_timestamp    ts;
 };
 
 /*
@@ -261,7 +253,6 @@
  *                           handled
  * @error_type:              Identify different errors
  * @ts:                      Timestamp
- * @ctx:                     Context data received during acquire
  */
 struct cam_vfe_bus_irq_evt_payload {
 	struct list_head            list;
@@ -271,32 +262,8 @@
 	uint32_t                    overflow_status;
 	uint32_t                    image_size_violation_status;
 	uint32_t                    evt_id;
-	uint32_t                    irq_reg_val[CAM_IFE_IRQ_BUS_VER3_REG_MAX];
-	uint32_t                    error_type;
+	uint32_t                    irq_reg_val[CAM_IFE_BUS_IRQ_REGISTERS_MAX];
 	struct cam_isp_timestamp    ts;
-	void                       *ctx;
-};
-
-/*
- * struct cam_vfe_irq_handler_priv:
- *
- * @Brief:                   This structure is used as private data to
- *                           register with IRQ controller. It has information
- *                           needed by top half and bottom half.
- *
- * @core_index:              Index of VFE HW that generated this IRQ event
- * @core_info:               Private data of handler in bottom half context
- * @mem_base:                Mapped base address of the register space
- * @reset_complete:          Completion structure to be signaled if Reset IRQ
- *                           is Set
- * @hw_version:              CPAS hw version
- */
-struct cam_vfe_irq_handler_priv {
-	uint32_t                     core_index;
-	void                        *core_info;
-	void __iomem                *mem_base;
-	struct completion           *reset_complete;
-	uint32_t                     hw_version;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index abab72f..485da8b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -19,80 +19,23 @@
 #include "cam_cpas_api.h"
 
 static const char drv_name[] = "vfe";
-static uint32_t irq_reg_offset[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0x00000054,
-	0x00000058,
-	0x0000005C,
-	0x00000074,
-};
 
-static uint32_t camif_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0x00000000,
-	0x00000007,
-	0x00000000,
-};
+#define CAM_VFE_17X_CLEAR_0_REG_OFFSET              0x00000064
+#define CAM_VFE_17X_CLEAR_1_REG_OFFSET              0x00000068
+#define CAM_VFE_17X_IRQ_CMD_REG_OFFSET              0x00000058
+#define CAM_VFE_17X_TOP_RESET_MASK                  0x80000000
 
-static uint32_t camif_fe_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0x10000056,
-	0x00000000,
-};
+#define CAM_VFE_48X_CLEAR_0_REG_OFFSET              0x00000048
+#define CAM_VFE_48X_CLEAR_1_REG_OFFSET              0x0000004C
+#define CAM_VFE_48X_CLEAR_2_REG_OFFSET              0x00000050
+#define CAM_VFE_48X_IRQ_CMD_REG_OFFSET              0x00000038
+#define CAM_VFE_48X_TOP_RESET_MASK                  0x00000001
 
-static uint32_t camif_irq_err_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0xFBE00200,
-	0x00000000,
-	0x303FFF80,
-};
-
-static uint32_t rdi_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0x38E00000,
-	0xFFF0,
-	0x00000000,
-};
-
-static int cam_vfe_get_evt_payload(struct cam_vfe_hw_core_info *core_info,
-	struct cam_vfe_top_irq_evt_payload    **evt_payload)
-{
-	spin_lock(&core_info->spin_lock);
-	if (list_empty(&core_info->free_payload_list)) {
-		*evt_payload = NULL;
-		spin_unlock(&core_info->spin_lock);
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload, core info 0x%x\n",
-			core_info->cpas_handle);
-		return -ENODEV;
-	}
-
-	*evt_payload = list_first_entry(&core_info->free_payload_list,
-		struct cam_vfe_top_irq_evt_payload, list);
-	list_del_init(&(*evt_payload)->list);
-	spin_unlock(&core_info->spin_lock);
-
-	return 0;
-}
-
-int cam_vfe_put_evt_payload(void             *core_info,
-	struct cam_vfe_top_irq_evt_payload  **evt_payload)
-{
-	struct cam_vfe_hw_core_info        *vfe_core_info = core_info;
-	unsigned long                       flags;
-
-	if (!core_info) {
-		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
-		return -EINVAL;
-	}
-	if (*evt_payload == NULL) {
-		CAM_ERR(CAM_ISP, "No payload to put");
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&vfe_core_info->spin_lock, flags);
-	(*evt_payload)->error_type = 0;
-	list_add_tail(&(*evt_payload)->list, &vfe_core_info->free_payload_list);
-	*evt_payload = NULL;
-	spin_unlock_irqrestore(&vfe_core_info->spin_lock, flags);
-
-
-	return 0;
-}
+#define CAM_VFE_LITE_48X_CLEAR_0_REG_OFFSET         0x00000034
+#define CAM_VFE_LITE_48X_CLEAR_1_REG_OFFSET         0x00000038
+#define CAM_VFE_LITE_48X_CLEAR_2_REG_OFFSET         0x0000003C
+#define CAM_VFE_LITE_48X_IRQ_CMD_REG_OFFSET         0x00000024
+#define CAM_VFE_LITE_48X_TOP_RESET_MASK             0x00020000
 
 int cam_vfe_get_hw_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size)
 {
@@ -121,9 +64,12 @@
 	struct cam_irq_th_payload         *th_payload)
 {
 	int32_t                            rc = -EINVAL;
-	struct cam_vfe_irq_handler_priv   *handler_priv;
+	struct cam_hw_info                *vfe_hw;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	void __iomem                      *mem_base;
 
-	handler_priv = th_payload->handler_priv;
+	vfe_hw = th_payload->handler_priv;
+	soc_info = &vfe_hw->soc_info;
 
 	CAM_DBG(CAM_ISP, "Enter");
 
@@ -131,26 +77,56 @@
 	 * Clear All IRQs to avoid spurious IRQs immediately
 	 * after Reset Done.
 	 */
+	CAM_DBG(CAM_ISP, "TOP_IRQ_STATUS_0 = 0x%x",
+		th_payload->evt_status_arr[0]);
 
-	switch (handler_priv->hw_version) {
+	mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
+
+	switch (soc_info->hw_version) {
 	case CAM_CPAS_TITAN_480_V100:
-		if (th_payload->evt_status_arr[0] & 0x1) {
-			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x48);
-			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x4C);
-			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x50);
-			cam_io_w(0x1, handler_priv->mem_base + 0x38);
-			CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
-			complete(handler_priv->reset_complete);
-			rc = 0;
+		if (strnstr(soc_info->compatible, "lite",
+			strlen(soc_info->compatible)) == NULL) {
+			if (th_payload->evt_status_arr[0] & 0x1) {
+				cam_io_w(0xFFFFFFFF, mem_base +
+					CAM_VFE_48X_CLEAR_0_REG_OFFSET);
+				cam_io_w(0xFFFFFFFF, mem_base +
+					CAM_VFE_48X_CLEAR_1_REG_OFFSET);
+				cam_io_w(0xFFFFFFFF, mem_base +
+					CAM_VFE_48X_CLEAR_2_REG_OFFSET);
+				cam_io_w(0x00000001, mem_base +
+					CAM_VFE_48X_IRQ_CMD_REG_OFFSET);
+				CAM_DBG(CAM_ISP,
+					"Calling Complete for RESET CMD");
+				complete(&vfe_hw->hw_complete);
+				rc = 0;
+			}
+		} else {
+			if (th_payload->evt_status_arr[0] & (1<<17)) {
+				cam_io_w(0xFFFFFFFF, mem_base +
+					CAM_VFE_LITE_48X_CLEAR_0_REG_OFFSET);
+				cam_io_w(0xFFFFFFFF, mem_base +
+					CAM_VFE_LITE_48X_CLEAR_1_REG_OFFSET);
+				cam_io_w(0xFFFFFFFF, mem_base +
+					CAM_VFE_LITE_48X_CLEAR_2_REG_OFFSET);
+				cam_io_w(0x00000001, mem_base +
+					CAM_VFE_LITE_48X_IRQ_CMD_REG_OFFSET);
+				CAM_DBG(CAM_ISP,
+					"Calling Complete for RESET CMD");
+				complete(&vfe_hw->hw_complete);
+				rc = 0;
+			}
 		}
 		break;
 	default:
 		if (th_payload->evt_status_arr[0] & (1<<31)) {
-			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x64);
-			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x68);
-			cam_io_w(0x00000001, handler_priv->mem_base + 0x58);
+			cam_io_w(0xFFFFFFFF, mem_base +
+				CAM_VFE_17X_CLEAR_0_REG_OFFSET);
+			cam_io_w(0xFFFFFFFF, mem_base +
+				CAM_VFE_17X_CLEAR_1_REG_OFFSET);
+			cam_io_w(0x00000001, mem_base +
+				CAM_VFE_17X_IRQ_CMD_REG_OFFSET);
 			CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
-			complete(handler_priv->reset_complete);
+			complete(&vfe_hw->hw_complete);
 			rc = 0;
 		}
 		break;
@@ -160,74 +136,6 @@
 	return rc;
 }
 
-static int cam_vfe_irq_err_top_half(uint32_t    evt_id,
-	struct cam_irq_th_payload   *th_payload)
-{
-	int32_t                              rc;
-	int                                  i;
-	struct cam_vfe_irq_handler_priv     *handler_priv;
-	struct cam_vfe_top_irq_evt_payload  *evt_payload;
-	struct cam_vfe_hw_core_info         *core_info;
-	bool                                 error_flag = false;
-
-	CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_1 = %x",
-		th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]);
-
-	handler_priv = th_payload->handler_priv;
-	core_info =  handler_priv->core_info;
-	/*
-	 *  need to handle overflow condition here, otherwise irq storm
-	 *  will block everything
-	 */
-	if (th_payload->evt_status_arr[1] ||
-		(th_payload->evt_status_arr[0] & camif_irq_err_reg_mask[0])) {
-		CAM_ERR(CAM_ISP,
-			"Encountered Error: vfe:%d:  Irq_status0=0x%x Status1=0x%x",
-			handler_priv->core_index, th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1]);
-		CAM_ERR(CAM_ISP,
-			"Stopping further IRQ processing from this HW index=%d",
-			handler_priv->core_index);
-		cam_irq_controller_disable_irq(core_info->vfe_irq_controller,
-			core_info->irq_err_handle);
-		cam_irq_controller_clear_and_mask(evt_id,
-			core_info->vfe_irq_controller);
-		error_flag = true;
-	}
-
-	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
-	if (rc) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue");
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
-			th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1]);
-		return rc;
-	}
-
-	cam_isp_hw_get_timestamp(&evt_payload->ts);
-
-	evt_payload->core_index = handler_priv->core_index;
-	evt_payload->core_info  = handler_priv->core_info;
-	evt_payload->evt_id  = evt_id;
-
-	for (i = 0; i < th_payload->num_registers; i++)
-		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
-
-	for (; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) {
-		evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
-			irq_reg_offset[i]);
-	}
-
-	if (error_flag)
-		CAM_INFO(CAM_ISP, "Violation status = %x",
-			evt_payload->irq_reg_val[2]);
-
-	th_payload->evt_payload_priv = evt_payload;
-
-	return rc;
-}
-
 int cam_vfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size)
 {
 	struct cam_hw_info                *vfe_hw = hw_priv;
@@ -341,7 +249,7 @@
 	mutex_lock(&vfe_hw->hw_mutex);
 	if (!vfe_hw->open_count) {
 		mutex_unlock(&vfe_hw->hw_mutex);
-		CAM_ERR(CAM_ISP, "Error! Unbalanced deinit");
+		CAM_ERR(CAM_ISP, "Error. Unbalanced deinit");
 		return -EFAULT;
 	}
 	vfe_hw->open_count--;
@@ -407,57 +315,53 @@
 	soc_info = &vfe_hw->soc_info;
 	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
 
-	core_info->irq_payload.core_index = soc_info->index;
-	core_info->irq_payload.mem_base =
-		vfe_hw->soc_info.reg_map[VFE_CORE_BASE_IDX].mem_base;
-	core_info->irq_payload.hw_version = soc_info->hw_version;
-	core_info->irq_payload.core_info = core_info;
-	core_info->irq_payload.reset_complete = &vfe_hw->hw_complete;
-
 	memset(top_reset_irq_reg_mask, 0, sizeof(top_reset_irq_reg_mask));
 
-	switch (vfe_hw->soc_info.hw_version) {
+	switch (soc_info->hw_version) {
 	case CAM_CPAS_TITAN_480_V100:
 		if (strnstr(soc_info->compatible, "lite",
 			strlen(soc_info->compatible)) == NULL)
 			top_reset_irq_reg_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0]
-				= 0x00000001;
+				= CAM_VFE_48X_TOP_RESET_MASK;
 		else
 			top_reset_irq_reg_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0]
-				= 0x00020000;
+				= CAM_VFE_LITE_48X_TOP_RESET_MASK;
 		break;
 	default:
 		top_reset_irq_reg_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0]
-			= 0x80000000;
+			= CAM_VFE_17X_TOP_RESET_MASK;
 		break;
 	}
 
-	core_info->irq_handle = cam_irq_controller_subscribe_irq(
+	core_info->reset_irq_handle = cam_irq_controller_subscribe_irq(
 		core_info->vfe_irq_controller, CAM_IRQ_PRIORITY_0,
-		top_reset_irq_reg_mask, &core_info->irq_payload,
+		top_reset_irq_reg_mask, vfe_hw,
 		cam_vfe_reset_irq_top_half, NULL, NULL, NULL);
-	if (core_info->irq_handle < 0) {
+	if (core_info->reset_irq_handle < 1) {
 		CAM_ERR(CAM_ISP, "subscribe irq controller failed");
+		core_info->reset_irq_handle = 0;
 		return -EFAULT;
 	}
 
 	reinit_completion(&vfe_hw->hw_complete);
 
-	CAM_DBG(CAM_ISP, "calling RESET on vfe %d", soc_info->index);
+	CAM_DBG(CAM_ISP, "calling RESET on VFE:%d", soc_info->index);
+
 	core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv,
 		reset_core_args, arg_size);
-	CAM_DBG(CAM_ISP, "waiting for vfe reset complete");
+
 	/* Wait for Completion or Timeout of 500ms */
 	rc = wait_for_completion_timeout(&vfe_hw->hw_complete, 500);
-	if (!rc)
-		CAM_ERR(CAM_ISP, "Error! Reset Timeout");
 
-	CAM_DBG(CAM_ISP, "reset complete done (%d)", rc);
+	if (!rc)
+		CAM_ERR(CAM_ISP, "Reset Timeout");
+	else
+		CAM_DBG(CAM_ISP, "reset complete done (%d)", rc);
 
 	rc = cam_irq_controller_unsubscribe_irq(
-		core_info->vfe_irq_controller, core_info->irq_handle);
+		core_info->vfe_irq_controller, core_info->reset_irq_handle);
 	if (rc)
-		CAM_ERR(CAM_ISP, "Error! Unsubscribe failed");
+		CAM_ERR(CAM_ISP, "Error. Unsubscribe failed");
 
 	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
@@ -472,55 +376,6 @@
 	time_stamp->mono_time.tv_usec   = ts.tv_nsec/1000;
 }
 
-static int cam_vfe_irq_top_half(uint32_t    evt_id,
-	struct cam_irq_th_payload   *th_payload)
-{
-	int32_t                              rc;
-	int                                  i;
-	struct cam_vfe_irq_handler_priv     *handler_priv;
-	struct cam_vfe_top_irq_evt_payload  *evt_payload;
-	struct cam_vfe_hw_core_info         *core_info;
-
-	handler_priv = th_payload->handler_priv;
-
-	for (i = 0; i < th_payload->num_registers; i++)
-		CAM_DBG(CAM_ISP, "IRQ status_%d = 0x%x",
-		i, th_payload->evt_status_arr[i]);
-
-
-	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
-	if (rc) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue");
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
-			th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1]);
-		return rc;
-	}
-
-	core_info =  handler_priv->core_info;
-	cam_isp_hw_get_timestamp(&evt_payload->ts);
-
-	evt_payload->core_index = handler_priv->core_index;
-	evt_payload->core_info  = handler_priv->core_info;
-	evt_payload->evt_id  = evt_id;
-	evt_payload->hw_version = handler_priv->hw_version;
-
-	for (i = 0; i < th_payload->num_registers; i++)
-		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
-
-	evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
-		irq_reg_offset[i]);
-
-	CAM_DBG(CAM_ISP,
-		"Violation status = 0x%x", evt_payload->irq_reg_val[i]);
-
-	th_payload->evt_payload_priv = evt_payload;
-
-	CAM_DBG(CAM_ISP, "Exit");
-	return rc;
-}
-
 int cam_vfe_reserve(void *hw_priv, void *reserve_args, uint32_t arg_size)
 {
 	struct cam_vfe_hw_core_info       *core_info = NULL;
@@ -528,7 +383,6 @@
 	struct cam_vfe_acquire_args       *acquire;
 	int rc = -ENODEV;
 
-
 	if (!hw_priv || !reserve_args || (arg_size !=
 		sizeof(struct cam_vfe_acquire_args))) {
 		CAM_ERR(CAM_ISP, "Invalid input arguments");
@@ -542,8 +396,7 @@
 	if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_IN) {
 		rc = core_info->vfe_top->hw_ops.reserve(
 			core_info->vfe_top->top_priv,
-			acquire,
-			sizeof(*acquire));
+			acquire, sizeof(*acquire));
 	} else if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_OUT) {
 		rc = core_info->vfe_bus->hw_ops.reserve(
 			core_info->vfe_bus->bus_priv, acquire,
@@ -553,16 +406,14 @@
 			rc = core_info->vfe_rd_bus->hw_ops.reserve(
 				core_info->vfe_rd_bus->bus_priv, acquire,
 				sizeof(*acquire));
-	} else {
+	} else
 		CAM_ERR(CAM_ISP, "Invalid res type:%d", acquire->rsrc_type);
-	}
 
 	mutex_unlock(&vfe_hw->hw_mutex);
 
 	return rc;
 }
 
-
 int cam_vfe_release(void *hw_priv, void *release_args, uint32_t arg_size)
 {
 	struct cam_vfe_hw_core_info       *core_info = NULL;
@@ -621,91 +472,33 @@
 	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
 	isp_res = (struct cam_isp_resource_node  *)start_args;
 	core_info->tasklet_info = isp_res->tasklet_info;
-	core_info->irq_payload.hw_version = soc_info->hw_version;
 
 	mutex_lock(&vfe_hw->hw_mutex);
 	if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
-		if (isp_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
-			isp_res->irq_handle =
-				cam_irq_controller_subscribe_irq(
-					core_info->vfe_irq_controller,
-					CAM_IRQ_PRIORITY_1,
-					camif_irq_reg_mask,
-					&core_info->irq_payload,
-					cam_vfe_irq_top_half,
-					cam_ife_mgr_do_tasklet,
-					isp_res->tasklet_info,
-					&tasklet_bh_api);
-			if (isp_res->irq_handle < 1)
-				rc = -ENOMEM;
-		} else if (isp_res->res_id == CAM_ISP_HW_VFE_IN_RD) {
-			isp_res->irq_handle =
-				cam_irq_controller_subscribe_irq(
-					core_info->vfe_irq_controller,
-					CAM_IRQ_PRIORITY_1,
-					camif_fe_irq_reg_mask,
-					&core_info->irq_payload,
-					cam_vfe_irq_top_half,
-					cam_ife_mgr_do_tasklet,
-					isp_res->tasklet_info,
-					&tasklet_bh_api);
-			if (isp_res->irq_handle < 1)
-				rc = -ENOMEM;
-		} else if (isp_res->rdi_only_ctx) {
-			isp_res->irq_handle =
-				cam_irq_controller_subscribe_irq(
-					core_info->vfe_irq_controller,
-					CAM_IRQ_PRIORITY_1,
-					rdi_irq_reg_mask,
-					&core_info->irq_payload,
-					cam_vfe_irq_top_half,
-					cam_ife_mgr_do_tasklet,
-					isp_res->tasklet_info,
-					&tasklet_bh_api);
-			if (isp_res->irq_handle < 1)
-				rc = -ENOMEM;
-		}
+		rc = core_info->vfe_top->hw_ops.start(
+			core_info->vfe_top->top_priv, isp_res,
+			sizeof(struct cam_isp_resource_node));
 
-		if (rc == 0) {
-			rc = core_info->vfe_top->hw_ops.start(
-				core_info->vfe_top->top_priv, isp_res,
-				sizeof(struct cam_isp_resource_node));
-			if (rc)
-				CAM_ERR(CAM_ISP, "Start failed. type:%d",
-					isp_res->res_type);
-		} else {
-			CAM_ERR(CAM_ISP,
-				"Error! subscribe irq controller failed");
-		}
+		if (rc)
+			CAM_ERR(CAM_ISP, "Failed to start VFE IN");
 	} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
 		rc = core_info->vfe_bus->hw_ops.start(isp_res, NULL, 0);
+
+		if (rc)
+			CAM_ERR(CAM_ISP, "Failed to start VFE OUT");
 	} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_BUS_RD) {
-		if (core_info->vfe_rd_bus)
+		if (core_info->vfe_rd_bus) {
 			rc = core_info->vfe_rd_bus->hw_ops.start(isp_res,
 				NULL, 0);
+
+			if (rc)
+				CAM_ERR(CAM_ISP, "Failed to start BUS RD");
+		}
 	} else {
 		CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
 		rc = -EFAULT;
 	}
 
-	if (!core_info->irq_err_handle) {
-		core_info->irq_err_handle =
-			cam_irq_controller_subscribe_irq(
-				core_info->vfe_irq_controller,
-				CAM_IRQ_PRIORITY_0,
-				camif_irq_err_reg_mask,
-				&core_info->irq_payload,
-				cam_vfe_irq_err_top_half,
-				cam_ife_mgr_do_tasklet,
-				core_info->tasklet_info,
-				&tasklet_bh_api);
-		if (core_info->irq_err_handle < 1) {
-			CAM_ERR(CAM_ISP, "Error handle subscribe failure");
-			rc = -ENOMEM;
-			core_info->irq_err_handle = 0;
-		}
-	}
-
 	mutex_unlock(&vfe_hw->hw_mutex);
 
 	return rc;
@@ -729,10 +522,6 @@
 
 	mutex_lock(&vfe_hw->hw_mutex);
 	if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
-		cam_irq_controller_unsubscribe_irq(
-			core_info->vfe_irq_controller, isp_res->irq_handle);
-		isp_res->irq_handle = 0;
-
 		rc = core_info->vfe_top->hw_ops.stop(
 			core_info->vfe_top->top_priv, isp_res,
 			sizeof(struct cam_isp_resource_node));
@@ -746,11 +535,11 @@
 		CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
 	}
 
-	if (core_info->irq_err_handle) {
+	if (core_info->reset_irq_handle > 0) {
 		cam_irq_controller_unsubscribe_irq(
 			core_info->vfe_irq_controller,
-			core_info->irq_err_handle);
-		core_info->irq_err_handle = 0;
+			core_info->reset_irq_handle);
+		core_info->reset_irq_handle = 0;
 	}
 
 	mutex_unlock(&vfe_hw->hw_mutex);
@@ -855,7 +644,6 @@
 	struct cam_vfe_hw_info                     *vfe_hw_info)
 {
 	int rc = -EINVAL;
-	int i;
 
 	CAM_DBG(CAM_ISP, "Enter");
 
@@ -868,8 +656,8 @@
 		return rc;
 	}
 
-	rc = cam_vfe_top_init(vfe_hw_info->top_version,
-		soc_info, hw_intf, vfe_hw_info->top_hw_info,
+	rc = cam_vfe_top_init(vfe_hw_info->top_version, soc_info, hw_intf,
+		vfe_hw_info->top_hw_info, core_info->vfe_irq_controller,
 		&core_info->vfe_top);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Error, cam_vfe_top_init failed rc = %d", rc);
@@ -877,9 +665,8 @@
 	}
 
 	rc = cam_vfe_bus_init(vfe_hw_info->bus_version, BUS_TYPE_WR,
-		soc_info, hw_intf,
-		vfe_hw_info->bus_hw_info, core_info->vfe_irq_controller,
-		&core_info->vfe_bus);
+		soc_info, hw_intf, vfe_hw_info->bus_hw_info,
+		core_info->vfe_irq_controller, &core_info->vfe_bus);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Error, cam_vfe_bus_init failed rc = %d", rc);
 		goto deinit_top;
@@ -899,13 +686,6 @@
 			core_info->vfe_rd_bus, hw_intf->hw_idx);
 	}
 
-	INIT_LIST_HEAD(&core_info->free_payload_list);
-	for (i = 0; i < CAM_VFE_EVT_MAX; i++) {
-		INIT_LIST_HEAD(&core_info->evt_payload[i].list);
-		list_add_tail(&core_info->evt_payload[i].list,
-			&core_info->free_payload_list);
-	}
-
 	spin_lock_init(&core_info->spin_lock);
 
 	return rc;
@@ -924,15 +704,10 @@
 	struct cam_vfe_hw_info                       *vfe_hw_info)
 {
 	int                rc = -EINVAL;
-	int                i;
 	unsigned long      flags;
 
 	spin_lock_irqsave(&core_info->spin_lock, flags);
 
-	INIT_LIST_HEAD(&core_info->free_payload_list);
-	for (i = 0; i < CAM_VFE_EVT_MAX; i++)
-		INIT_LIST_HEAD(&core_info->evt_payload[i].list);
-
 	rc = cam_vfe_bus_deinit(vfe_hw_info->bus_version,
 		&core_info->vfe_bus);
 	if (rc)
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
index 7b5d8e4..43afd03 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE_CORE_H_
@@ -51,13 +51,8 @@
 	struct cam_vfe_bus                 *vfe_bus;
 	struct cam_vfe_bus                 *vfe_rd_bus;
 	void                               *tasklet_info;
-	struct cam_vfe_top_irq_evt_payload  evt_payload[CAM_VFE_EVT_MAX];
-	struct list_head                    free_payload_list;
-	struct cam_vfe_irq_handler_priv     irq_payload;
-	uint32_t                            cpas_handle;
-	int                                 irq_handle;
-	int                                 irq_err_handle;
 	spinlock_t                          spin_lock;
+	int                                 reset_irq_handle;
 };
 
 int cam_vfe_get_hw_caps(void *device_priv,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
index 7f1001f..a3f1220 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -51,6 +51,10 @@
 		return rc;
 	}
 
+	if (strnstr(soc_info->compatible, "lite",
+		strlen(soc_info->compatible)) != NULL)
+		goto end;
+
 	switch (soc_info->hw_version) {
 	case CAM_CPAS_TITAN_480_V100:
 		num_ubwc_cfg = of_property_count_u32_elems(of_node,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
index 82ffa44..db61bfb 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
@@ -75,6 +75,8 @@
 	.eof_irq_mask                    = 0x00000002,
 	.error_irq_mask0                 = 0x0003FC00,
 	.error_irq_mask1                 = 0xEFFF7E80,
+	.subscribe_irq_mask0             = 0x00000017,
+	.subscribe_irq_mask1             = 0x00000000,
 	.enable_diagnostic_hw            = 0x1,
 };
 
@@ -94,8 +96,10 @@
 	.lite_epoch0_irq_mask            = 0x00100000,
 	.dual_pd_reg_upd_irq_mask        = 0x04000000,
 	.lite_eof_irq_mask               = 0x00080000,
-	.lite_error_irq_mask0            = 0x00400000,
-	.lite_error_irq_mask1            = 0x00004100,
+	.lite_err_irq_mask0              = 0x00400000,
+	.lite_err_irq_mask1              = 0x00004100,
+	.lite_subscribe_irq_mask0        = 0x001C0000,
+	.lite_subscribe_irq_mask1        = 0x0,
 	.extern_reg_update_shift         = 4,
 	.dual_pd_path_sel_shift          = 24,
 };
@@ -149,6 +153,13 @@
 	.reg_update_cmd           = 0x000004AC,
 };
 
+static struct cam_vfe_rdi_common_reg_data vfe175_rdi_reg_data = {
+	.subscribe_irq_mask0      = 0x780001E0,
+	.subscribe_irq_mask1      = 0x0,
+	.error_irq_mask0          = 0x0,
+	.error_irq_mask1          = 0x3C,
+};
+
 static struct cam_vfe_rdi_reg_data  vfe_175_rdi_0_data = {
 	.reg_update_cmd_data      = 0x2,
 	.sof_irq_mask             = 0x8000000,
@@ -180,8 +191,9 @@
 		.reg_data       = &vfe175_camif_lite_reg_data,
 		},
 	.rdi_hw_info = {
-		.common_reg = &vfe175_top_common_reg,
-		.rdi_reg    = &vfe175_rdi_reg,
+		.common_reg      = &vfe175_top_common_reg,
+		.rdi_reg         = &vfe175_rdi_reg,
+		.common_reg_data = &vfe175_rdi_reg_data,
 		.reg_data = {
 			&vfe_175_rdi_0_data,
 			&vfe_175_rdi_1_data,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
index 7712496..393c774 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
@@ -92,7 +92,7 @@
 	.reg_update_cmd           = 0x000004AC,
 	.vfe_diag_config          = 0x00000C48,
 	.vfe_diag_sensor_status   = 0x00000C4C,
-	.fe_cfg                = 0x00000084,
+	.fe_cfg                   = 0x00000084,
 };
 
 static struct cam_vfe_fe_reg_data vfe_175_130_fe_reg_data = {
@@ -123,7 +123,7 @@
 	.error_irq_mask0                 = 0x0003FC00,
 	.error_irq_mask1                 = 0xEFFF7E80,
 	.enable_diagnostic_hw            = 0x1,
-	.fe_mux_data                  = 0x2,
+	.fe_mux_data                     = 0x2,
 	.hbi_cnt_shift                   = 0x8,
 };
 
@@ -144,8 +144,8 @@
 	.lite_epoch0_irq_mask            = 0x00100000,
 	.dual_pd_reg_upd_irq_mask        = 0x04000000,
 	.lite_eof_irq_mask               = 0x00080000,
-	.lite_error_irq_mask0            = 0x00400000,
-	.lite_error_irq_mask1            = 0x00004100,
+	.lite_err_irq_mask0              = 0x00400000,
+	.lite_err_irq_mask1              = 0x00004100,
 	.extern_reg_update_shift         = 4,
 	.dual_pd_path_sel_shift          = 24,
 };
@@ -199,6 +199,13 @@
 	.reg_update_cmd           = 0x000004AC,
 };
 
+static struct cam_vfe_rdi_common_reg_data vfe175_130_rdi_reg_data = {
+	.subscribe_irq_mask0      = 0x780001E0,
+	.subscribe_irq_mask1      = 0x0,
+	.error_irq_mask0          = 0x0,
+	.error_irq_mask1          = 0x3C,
+};
+
 static struct cam_vfe_rdi_reg_data  vfe_175_130_rdi_0_data = {
 	.reg_update_cmd_data      = 0x2,
 	.sof_irq_mask             = 0x8000000,
@@ -230,8 +237,9 @@
 		.reg_data       = &vfe175_130_camif_lite_reg_data,
 		},
 	.rdi_hw_info = {
-		.common_reg = &vfe175_130_top_common_reg,
-		.rdi_reg    = &vfe175_130_rdi_reg,
+		.common_reg      = &vfe175_130_top_common_reg,
+		.rdi_reg         = &vfe175_130_rdi_reg,
+		.common_reg_data = &vfe175_130_rdi_reg_data,
 		.reg_data = {
 			&vfe_175_130_rdi_0_data,
 			&vfe_175_130_rdi_1_data,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
index 3f86f61..71d848a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
@@ -72,10 +72,10 @@
 	.sof_irq_mask                    = 0x00000001,
 	.epoch0_irq_mask                 = 0x00000004,
 	.epoch1_irq_mask                 = 0x00000008,
-	.reg_update_irq_mask             = 0x00000001,
 	.eof_irq_mask                    = 0x00000002,
-	.error_irq_mask0                 = 0x0003FC00,
-	.error_irq_mask2                 = 0xEFFF7E80,
+	.error_irq_mask0                 = 0x82000200,
+	.error_irq_mask2                 = 0x30301F80,
+	.subscribe_irq_mask1             = 0x00000007,
 	.enable_diagnostic_hw            = 0x1,
 	.pp_camif_cfg_en_shift           = 0,
 	.pp_camif_cfg_ife_out_en_shift   = 8,
@@ -152,37 +152,40 @@
 	{
 		.extern_reg_update_shift         = 0,
 		.reg_update_cmd_data             = 0x2,
-		.epoch_line_cfg                  = 0x00140014,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x10,
 		.epoch0_irq_mask                 = 0x40,
 		.epoch1_irq_mask                 = 0x80,
 		.eof_irq_mask                    = 0x20,
 		.error_irq_mask0                 = 0x20000000,
 		.error_irq_mask2                 = 0x20000,
+		.subscribe_irq_mask1             = 0x30,
 		.enable_diagnostic_hw            = 0x1,
 	},
 	{
 		.extern_reg_update_shift         = 0,
 		.reg_update_cmd_data             = 0x4,
-		.epoch_line_cfg                  = 0x00140014,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x100,
 		.epoch0_irq_mask                 = 0x400,
 		.epoch1_irq_mask                 = 0x800,
 		.eof_irq_mask                    = 0x200,
 		.error_irq_mask0                 = 0x10000000,
 		.error_irq_mask2                 = 0x40000,
+		.subscribe_irq_mask1             = 0x300,
 		.enable_diagnostic_hw            = 0x1,
 	},
 	{
 		.extern_reg_update_shift         = 0,
 		.reg_update_cmd_data             = 0x8,
-		.epoch_line_cfg                  = 0x00140014,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x1000,
 		.epoch0_irq_mask                 = 0x4000,
 		.epoch1_irq_mask                 = 0x8000,
 		.eof_irq_mask                    = 0x2000,
 		.error_irq_mask0                 = 0x8000000,
 		.error_irq_mask2                 = 0x80000,
+		.subscribe_irq_mask1             = 0x3000,
 		.enable_diagnostic_hw            = 0x1,
 	},
 };
@@ -208,7 +211,9 @@
 	.epoch0_irq_mask            = 0x400000,
 	.epoch1_irq_mask            = 0x800000,
 	.eof_irq_mask               = 0x200000,
-	.error_irq_mask0            = 0x18000,
+	.error_irq_mask0            = 0x0,
+	.error_irq_mask2            = 0x18000,
+	.subscribe_irq_mask1        = 0x300000,
 	.enable_diagnostic_hw       = 0x1,
 };
 
@@ -233,7 +238,9 @@
 	.epoch0_irq_mask            = 0x40000,
 	.epoch1_irq_mask            = 0x80000,
 	.eof_irq_mask               = 0x20000,
-	.error_irq_mask0            = 0x6000,
+	.error_irq_mask0            = 0x40000000,
+	.error_irq_mask2            = 0x6000,
+	.subscribe_irq_mask1        = 0x30000,
 	.enable_diagnostic_hw       = 0x1,
 };
 
@@ -1143,127 +1150,145 @@
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI0,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_3,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI1,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_4,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI2,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_5,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_FULL,
 			.max_width     = 4096,
 			.max_height    = 4096,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_DS4,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_DS16,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_FD,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_PDAF,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  =
 				CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  =
 				CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
-			.vfe_out_type  =
-				CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST,
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP,
 			.max_width     = 4096,
 			.max_height    = 4096,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_2PD,
 			.max_width     = 1920,
 			.max_height    = 1080,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_1,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_LCR,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_2,
 		},
 	},
+	.comp_done_shift = 6,
+	.top_irq_shift   = 7,
 };
 
 static struct cam_irq_register_set vfe480_bus_rd_irq_reg[1] = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
index beb93c3..9ebeb55 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
@@ -107,50 +107,54 @@
 static struct cam_vfe_camif_lite_ver3_reg_data vfe48x_camif_rdi_reg_data[4] = {
 	{
 		.extern_reg_update_shift         = 0,
-		.reg_update_cmd_data             = 0x11,
-		.epoch_line_cfg                  = 0x00140014,
+		.reg_update_cmd_data             = 0x1,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x1,
 		.epoch0_irq_mask                 = 0x4,
 		.epoch1_irq_mask                 = 0x8,
 		.eof_irq_mask                    = 0x02,
 		.error_irq_mask0                 = 0x1,
 		.error_irq_mask2                 = 0x100,
+		.subscribe_irq_mask1             = 0x3,
 		.enable_diagnostic_hw            = 0x1,
 	},
 	{
 		.extern_reg_update_shift         = 0,
-		.reg_update_cmd_data             = 0x22,
-		.epoch_line_cfg                  = 0x00140014,
+		.reg_update_cmd_data             = 0x2,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x10,
 		.epoch0_irq_mask                 = 0x40,
 		.epoch1_irq_mask                 = 0x80,
 		.eof_irq_mask                    = 0x20,
 		.error_irq_mask0                 = 0x2,
 		.error_irq_mask2                 = 0x200,
+		.subscribe_irq_mask1             = 0x30,
 		.enable_diagnostic_hw            = 0x1,
 	},
 	{
 		.extern_reg_update_shift         = 0,
-		.reg_update_cmd_data             = 0x44,
-		.epoch_line_cfg                  = 0x00140014,
+		.reg_update_cmd_data             = 0x4,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x100,
 		.epoch0_irq_mask                 = 0x400,
 		.epoch1_irq_mask                 = 0x800,
 		.eof_irq_mask                    = 0x200,
 		.error_irq_mask0                 = 0x4,
 		.error_irq_mask2                 = 0x400,
+		.subscribe_irq_mask1             = 0x300,
 		.enable_diagnostic_hw            = 0x1,
 	},
 	{
 		.extern_reg_update_shift         = 0,
-		.reg_update_cmd_data             = 0x88,
-		.epoch_line_cfg                  = 0x00140014,
+		.reg_update_cmd_data             = 0x8,
+		.epoch_line_cfg                  = 0x0,
 		.sof_irq_mask                    = 0x1000,
 		.epoch0_irq_mask                 = 0x4000,
 		.epoch1_irq_mask                 = 0x8000,
 		.eof_irq_mask                    = 0x2000,
 		.error_irq_mask0                 = 0x8,
 		.error_irq_mask2                 = 0x800,
+		.subscribe_irq_mask1             = 0x3000,
 		.enable_diagnostic_hw            = 0x1,
 	},
 };
@@ -356,23 +360,29 @@
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI0,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_0,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI1,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_1,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI2,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_2,
 		},
 		{
 			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI3,
 			.max_width     = -1,
 			.max_height    = -1,
+			.source_group  = CAM_VFE_BUS_VER3_SRC_GRP_3,
 		},
 	},
+	.comp_done_shift = 4,
+	.top_irq_shift   = 4,
 };
 
 static struct cam_vfe_hw_info cam_vfe_lite48x_hw_info = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c
index 2a94d69..3b5d065 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c
@@ -82,7 +82,6 @@
 	struct cam_vfe_bus_rd_ver1_reg_offset_bus_client  *hw_regs;
 	void                *ctx;
 
-	uint32_t             irq_enabled;
 	bool                 init_cfg_done;
 	bool                 hfr_cfg_done;
 
@@ -138,8 +137,8 @@
 	struct cam_isp_resource_node  vfe_bus_rd[
 		CAM_VFE_BUS_RD_VER1_VFE_BUSRD_MAX];
 
-	uint32_t                            irq_handle;
-	uint32_t                            error_irq_handle;
+	int                                 irq_handle;
+	int                                 error_irq_handle;
 };
 
 static int cam_vfe_bus_process_cmd(
@@ -254,7 +253,6 @@
 	rm_res_local->tasklet_info = tasklet;
 
 	rsrc_data = rm_res_local->res_priv;
-	rsrc_data->irq_enabled = subscribe_irq;
 	rsrc_data->ctx = ctx;
 	rsrc_data->is_dual = is_dual;
 	/* Set RM offset value to default */
@@ -273,7 +271,6 @@
 	struct cam_vfe_bus_rd_ver1_rm_resource_data *rsrc_data =
 		rm_res->res_priv;
 
-	rsrc_data->irq_enabled = 0;
 	rsrc_data->offset = 0;
 	rsrc_data->width = 0;
 	rsrc_data->height = 0;
@@ -507,7 +504,7 @@
 		rc = cam_vfe_bus_acquire_rm(ver1_bus_rd_priv,
 			bus_rd_acquire_args->out_port_info,
 			acq_args->tasklet,
-			bus_rd_acquire_args->ctx,
+			acq_args->priv,
 			bus_rd_res_id,
 			i,
 			subscribe_irq,
@@ -964,8 +961,9 @@
 		NULL,
 		NULL);
 
-	if (bus_priv->irq_handle <= 0) {
+	if (bus_priv->irq_handle < 1) {
 		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+		bus_priv->irq_handle = 0;
 		return -EFAULT;
 	}
 	/* no clock gating at bus input */
@@ -1003,10 +1001,6 @@
 		rc = cam_irq_controller_unsubscribe_irq(
 			bus_priv->common_data.bus_irq_controller,
 			bus_priv->error_irq_handle);
-		if (rc)
-			CAM_ERR(CAM_ISP,
-				"Failed to unsubscribe error irq rc=%d", rc);
-
 		bus_priv->error_irq_handle = 0;
 	}
 
@@ -1014,10 +1008,6 @@
 		rc = cam_irq_controller_unsubscribe_irq(
 			bus_priv->common_data.vfe_irq_controller,
 			bus_priv->irq_handle);
-		if (rc)
-			CAM_ERR(CAM_ISP,
-				"Failed to unsubscribe irq rc=%d", rc);
-
 		bus_priv->irq_handle = 0;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 8df2926..02d3ad3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -98,15 +98,14 @@
 	uint32_t                                    secure_mode;
 	uint32_t                                    num_sec_out;
 	uint32_t                                    addr_no_sync;
+	cam_hw_mgr_event_cb_func                    event_cb;
 };
 
 struct cam_vfe_bus_ver2_wm_resource_data {
 	uint32_t             index;
 	struct cam_vfe_bus_ver2_common_data            *common_data;
 	struct cam_vfe_bus_ver2_reg_offset_bus_client  *hw_regs;
-	void                                *ctx;
 
-	uint32_t             irq_enabled;
 	bool                 init_cfg_done;
 	bool                 hfr_cfg_done;
 
@@ -147,7 +146,6 @@
 	struct cam_vfe_bus_ver2_common_data         *common_data;
 	struct cam_vfe_bus_ver2_reg_offset_comp_grp *hw_regs;
 
-	uint32_t                         irq_enabled;
 	uint32_t                         comp_grp_local_idx;
 	uint32_t                         unique_id;
 
@@ -158,9 +156,6 @@
 	uint32_t                         addr_sync_mode;
 
 	uint32_t                         acquire_dev_cnt;
-	uint32_t                         irq_trigger_cnt;
-
-	void                            *ctx;
 };
 
 struct cam_vfe_bus_ver2_vfe_out_data {
@@ -180,6 +175,7 @@
 	uint32_t                         max_height;
 	struct cam_cdm_utils_ops        *cdm_util_ops;
 	uint32_t                         secure_mode;
+	void                            *priv;
 };
 
 struct cam_vfe_bus_ver2_priv {
@@ -195,8 +191,8 @@
 	struct list_head                    free_dual_comp_grp;
 	struct list_head                    used_comp_grp;
 
-	uint32_t                            irq_handle;
-	uint32_t                            error_irq_handle;
+	int                                 irq_handle;
+	int                                 error_irq_handle;
 	void                               *tasklet_info;
 };
 
@@ -253,8 +249,6 @@
 	struct cam_vfe_bus_irq_evt_payload     **evt_payload)
 {
 	struct cam_vfe_bus_ver2_common_data *common_data = NULL;
-	uint32_t  *ife_irq_regs = NULL;
-	uint32_t   status_reg0, status_reg1, status_reg2;
 	unsigned long flags;
 
 	if (!core_info) {
@@ -265,17 +259,6 @@
 		CAM_ERR(CAM_ISP, "No payload to put");
 		return -EINVAL;
 	}
-	(*evt_payload)->error_type = 0;
-	ife_irq_regs = (*evt_payload)->irq_reg_val;
-	status_reg0 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
-	status_reg1 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
-	status_reg2 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2];
-
-	if (status_reg0 || status_reg1 || status_reg2) {
-		CAM_DBG(CAM_ISP, "status0 0x%x status1 0x%x status2 0x%x",
-			status_reg0, status_reg1, status_reg2);
-		return 0;
-	}
 
 	common_data = core_info;
 
@@ -814,6 +797,78 @@
 	return wm_idx;
 }
 
+static void cam_vfe_bus_get_comp_vfe_out_res_id_list(
+	uint32_t comp_mask, uint32_t *out_list, int *num_out)
+{
+	int count = 0;
+
+	if (comp_mask & 0x1)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_0;
+
+	if (comp_mask & 0x2)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_1;
+
+	if (comp_mask & 0x4)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_2;
+
+	if ((comp_mask & 0x8) && (((comp_mask >> 4) & 0x1) == 0))
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_3;
+
+	if (comp_mask & 0x18)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_FULL;
+
+	if (comp_mask & 0x20)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS4;
+
+	if (comp_mask & 0x40)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS16;
+
+	if (comp_mask & 0x180)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_FD;
+
+	if (comp_mask & 0x200)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RAW_DUMP;
+
+	if (comp_mask & 0x800)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_HDR_BE;
+
+	if (comp_mask & 0x1000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST;
+
+	if (comp_mask & 0x2000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_TL_BG;
+
+	if (comp_mask & 0x4000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_BF;
+
+	if (comp_mask & 0x8000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_AWB_BG;
+
+	if (comp_mask & 0x10000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_BHIST;
+
+	if (comp_mask & 0x20000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_RS;
+
+	if (comp_mask & 0x40000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_CS;
+
+	if (comp_mask & 0x80000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_IHIST;
+
+	if (comp_mask & 0x300000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_FULL_DISP;
+
+	if (comp_mask & 0x400000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS4_DISP;
+
+	if (comp_mask & 0x800000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS16_DISP;
+
+	*num_out = count;
+
+}
+
 static enum cam_vfe_bus_packer_format
 	cam_vfe_bus_get_packer_fmt(uint32_t out_fmt, int wm_index)
 {
@@ -866,10 +921,8 @@
 	struct cam_vfe_bus_ver2_priv          *ver2_bus_priv,
 	struct cam_isp_out_port_info          *out_port_info,
 	void                                  *tasklet,
-	void                                  *ctx,
 	enum cam_vfe_bus_ver2_vfe_out_type     vfe_out_res_id,
 	enum cam_vfe_bus_plane_type            plane,
-	uint32_t                               subscribe_irq,
 	struct cam_isp_resource_node         **wm_res,
 	uint32_t                              *client_done_mask,
 	uint32_t                               is_dual)
@@ -899,8 +952,6 @@
 	wm_res_local->tasklet_info = tasklet;
 
 	rsrc_data = wm_res_local->res_priv;
-	rsrc_data->irq_enabled = subscribe_irq;
-	rsrc_data->ctx = ctx;
 	rsrc_data->format = out_port_info->format;
 	rsrc_data->pack_fmt = cam_vfe_bus_get_packer_fmt(rsrc_data->format,
 		wm_idx);
@@ -1096,7 +1147,6 @@
 	struct cam_vfe_bus_ver2_wm_resource_data   *rsrc_data =
 		wm_res->res_priv;
 
-	rsrc_data->irq_enabled = 0;
 	rsrc_data->offset = 0;
 	rsrc_data->width = 0;
 	rsrc_data->height = 0;
@@ -1131,14 +1181,15 @@
 	return 0;
 }
 
-static int cam_vfe_bus_start_wm(struct cam_isp_resource_node *wm_res)
+static int cam_vfe_bus_start_wm(
+	struct cam_isp_resource_node *wm_res,
+	uint32_t                     *bus_irq_reg_mask)
 {
 	int rc = 0, val = 0;
 	struct cam_vfe_bus_ver2_wm_resource_data   *rsrc_data =
 		wm_res->res_priv;
 	struct cam_vfe_bus_ver2_common_data        *common_data =
 		rsrc_data->common_data;
-	uint32_t                   bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
 	uint32_t camera_hw_version;
 
 	cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit);
@@ -1155,23 +1206,7 @@
 		cam_io_w_mb(rsrc_data->stride, (common_data->mem_base +
 			rsrc_data->hw_regs->stride));
 
-	/* Subscribe IRQ */
-	if (rsrc_data->irq_enabled) {
-		CAM_DBG(CAM_ISP, "Subscribe WM%d IRQ", rsrc_data->index);
-		bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG1] =
-			(1 << rsrc_data->index);
-		wm_res->irq_handle = cam_irq_controller_subscribe_irq(
-			common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
-			bus_irq_reg_mask, wm_res,
-			wm_res->top_half_handler,
-			cam_ife_mgr_do_tasklet_buf_done,
-			wm_res->tasklet_info, &tasklet_bh_api);
-		if (wm_res->irq_handle < 0) {
-			CAM_ERR(CAM_ISP, "Subscribe IRQ failed for WM %d",
-				rsrc_data->index);
-			return -EFAULT;
-		}
-	}
+	bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG1] = (1 << rsrc_data->index);
 
 	/* enable ubwc if needed*/
 	if (rsrc_data->en_ubwc) {
@@ -1249,13 +1284,6 @@
 		common_data->mem_base + rsrc_data->hw_regs->cfg);
 
 	/* Disable all register access, reply on global reset */
-	CAM_DBG(CAM_ISP, "WM res %d irq_enabled %d",
-		rsrc_data->index, rsrc_data->irq_enabled);
-	/* Unsubscribe IRQ */
-	if (rsrc_data->irq_enabled)
-		rc = cam_irq_controller_unsubscribe_irq(
-			common_data->bus_irq_controller,
-			wm_res->irq_handle);
 
 	wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	rsrc_data->init_cfg_done = false;
@@ -1267,63 +1295,36 @@
 static int cam_vfe_bus_handle_wm_done_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	int32_t                                     rc;
-	int                                         i;
-	struct cam_isp_resource_node               *wm_res = NULL;
-	struct cam_vfe_bus_ver2_wm_resource_data   *rsrc_data = NULL;
-	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
-
-	wm_res = th_payload->handler_priv;
-	if (!wm_res) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error: No resource");
-		return -ENODEV;
-	}
-
-	rsrc_data = wm_res->res_priv;
-
-	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
-	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
-
-	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
-	if (rc) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
-			th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1],
-			th_payload->evt_status_arr[2]);
-
-		return rc;
-	}
-
-	cam_isp_hw_get_timestamp(&evt_payload->ts);
-
-	evt_payload->ctx = rsrc_data->ctx;
-	evt_payload->core_index = rsrc_data->common_data->core_index;
-	evt_payload->evt_id  = evt_id;
-
-	for (i = 0; i < th_payload->num_registers; i++)
-		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
-
-	th_payload->evt_payload_priv = evt_payload;
-
-	CAM_DBG(CAM_ISP, "Exit");
-	return rc;
+	return -EPERM;
 }
 
-static int cam_vfe_bus_handle_wm_done_bottom_half(void *wm_node,
+static int cam_vfe_bus_handle_wm_done_bottom_half(void *handler_priv,
 	void *evt_payload_priv)
 {
 	int rc = CAM_VFE_IRQ_STATUS_ERR;
-	struct cam_isp_resource_node          *wm_res = wm_node;
+	struct cam_isp_resource_node          *wm_res = handler_priv;
 	struct cam_vfe_bus_irq_evt_payload    *evt_payload = evt_payload_priv;
 	struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data =
 		(wm_res == NULL) ? NULL : wm_res->res_priv;
 	uint32_t  *cam_ife_irq_regs;
 	uint32_t   status_reg;
 
-	if (!evt_payload || !rsrc_data)
+	if (!evt_payload || !wm_res || !rsrc_data)
 		return rc;
 
+	CAM_DBG(CAM_ISP, "addr of evt_payload = %llx core index:0x%x",
+		(uint64_t)evt_payload, evt_payload->core_index);
+	CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
+	CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
+	CAM_DBG(CAM_ISP, "bus_irq_status_2: = %x", evt_payload->irq_reg_val[2]);
+	CAM_DBG(CAM_ISP, "bus_irq_comp_err: = %x", evt_payload->irq_reg_val[3]);
+	CAM_DBG(CAM_ISP, "bus_irq_comp_owrt: = %x",
+		evt_payload->irq_reg_val[4]);
+	CAM_DBG(CAM_ISP, "bus_irq_dual_comp_err: = %x",
+		evt_payload->irq_reg_val[5]);
+	CAM_DBG(CAM_ISP, "bus_irq_dual_comp_owrt: = %x",
+		evt_payload->irq_reg_val[6]);
+
 	cam_ife_irq_regs = evt_payload->irq_reg_val;
 	status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
 
@@ -1331,30 +1332,29 @@
 		cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1] &=
 			~BIT(rsrc_data->index);
 		rc = CAM_VFE_IRQ_STATUS_SUCCESS;
+		evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
 	}
 	CAM_DBG(CAM_ISP, "status_reg %x rc %d wm_idx %d",
 		status_reg, rc, rsrc_data->index);
 
-	if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
-		cam_vfe_bus_put_evt_payload(rsrc_data->common_data,
-			&evt_payload);
-
 	return rc;
 }
 
 
-static int cam_vfe_bus_err_bottom_half(void *ctx_priv,
+static int cam_vfe_bus_err_bottom_half(void *handler_priv,
 	void *evt_payload_priv)
 {
-	struct cam_vfe_bus_irq_evt_payload *evt_payload;
+	struct cam_vfe_bus_irq_evt_payload *evt_payload = evt_payload_priv;
+	struct cam_vfe_bus_ver2_priv *bus_priv = handler_priv;
 	struct cam_vfe_bus_ver2_common_data *common_data;
+	struct cam_isp_hw_event_info evt_info;
 	uint32_t val = 0;
 
-	if (!ctx_priv || !evt_payload_priv)
+	if (!handler_priv || !evt_payload_priv)
 		return -EINVAL;
 
 	evt_payload = evt_payload_priv;
-	common_data = evt_payload->ctx;
+	common_data = &bus_priv->common_data;
 
 	val = evt_payload->debug_status_0;
 	CAM_ERR(CAM_ISP, "Bus Violation: debug_status_0 = 0x%x", val);
@@ -1432,6 +1432,15 @@
 		CAM_INFO(CAM_ISP, "DISP YC 16:1 violation");
 
 	cam_vfe_bus_put_evt_payload(common_data, &evt_payload);
+
+	evt_info.hw_idx = common_data->core_index;
+	evt_info.res_type = CAM_ISP_RESOURCE_VFE_OUT;
+	evt_info.res_id = CAM_VFE_BUS_VER2_VFE_OUT_MAX;
+
+	if (common_data->event_cb)
+		common_data->event_cb(NULL, CAM_ISP_HW_EVENT_ERROR,
+			(void *)&evt_info);
+
 	return 0;
 }
 
@@ -1457,8 +1466,8 @@
 	wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
 	INIT_LIST_HEAD(&wm_res->list);
 
-	wm_res->start = cam_vfe_bus_start_wm;
-	wm_res->stop = cam_vfe_bus_stop_wm;
+	wm_res->start = NULL;
+	wm_res->stop = NULL;
 	wm_res->top_half_handler = cam_vfe_bus_handle_wm_done_top_half;
 	wm_res->bottom_half_handler = cam_vfe_bus_handle_wm_done_bottom_half;
 	wm_res->hw_intf = ver2_bus_priv->common_data.hw_intf;
@@ -1525,7 +1534,6 @@
 	struct cam_vfe_bus_ver2_priv        *ver2_bus_priv,
 	struct cam_isp_out_port_info        *out_port_info,
 	void                                *tasklet,
-	void                                *ctx,
 	uint32_t                             unique_id,
 	uint32_t                             is_dual,
 	uint32_t                             is_master,
@@ -1607,7 +1615,6 @@
 
 	CAM_DBG(CAM_ISP, "Comp Grp type %u", rsrc_data->comp_grp_type);
 
-	rsrc_data->ctx = ctx;
 	rsrc_data->acquire_dev_cnt++;
 	*comp_grp = comp_grp_local;
 
@@ -1684,7 +1691,9 @@
 	return 0;
 }
 
-static int cam_vfe_bus_start_comp_grp(struct cam_isp_resource_node *comp_grp)
+static int cam_vfe_bus_start_comp_grp(
+	struct cam_isp_resource_node *comp_grp,
+	uint32_t                     *bus_irq_reg_mask)
 {
 	int rc = 0;
 	uint32_t addr_sync_cfg;
@@ -1692,7 +1701,6 @@
 		comp_grp->res_priv;
 	struct cam_vfe_bus_ver2_common_data        *common_data =
 		rsrc_data->common_data;
-	uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
 
 	CAM_DBG(CAM_ISP, "comp group id:%d streaming state:%d",
 		rsrc_data->comp_grp_type, comp_grp->res_state);
@@ -1762,28 +1770,8 @@
 			(1 << (rsrc_data->comp_grp_type + 5));
 	}
 
-	/*
-	 * For Dual composite subscribe IRQ only for master
-	 * For regular composite, subscribe IRQ always
-	 */
-	CAM_DBG(CAM_ISP, "Subscribe COMP_GRP%d IRQ", rsrc_data->comp_grp_type);
-	if (((rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
-		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) &&
-		(rsrc_data->is_master)) ||
-		(rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0 &&
-		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)) {
-		comp_grp->irq_handle = cam_irq_controller_subscribe_irq(
-			common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
-			bus_irq_reg_mask, comp_grp,
-			comp_grp->top_half_handler,
-			cam_ife_mgr_do_tasklet_buf_done,
-			comp_grp->tasklet_info, &tasklet_bh_api);
-		if (comp_grp->irq_handle < 0) {
-			CAM_ERR(CAM_ISP, "Subscribe IRQ failed for comp_grp %d",
-				rsrc_data->comp_grp_type);
-			return -EFAULT;
-		}
-	}
+	CAM_DBG(CAM_ISP, "VFE start COMP_GRP%d", rsrc_data->comp_grp_type);
+
 	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
 	return rc;
@@ -1792,21 +1780,7 @@
 static int cam_vfe_bus_stop_comp_grp(struct cam_isp_resource_node *comp_grp)
 {
 	int rc = 0;
-	struct cam_vfe_bus_ver2_comp_grp_data      *rsrc_data =
-		comp_grp->res_priv;
-	struct cam_vfe_bus_ver2_common_data        *common_data =
-		rsrc_data->common_data;
 
-	/* Unsubscribe IRQ */
-	if (((rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
-		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) &&
-		(rsrc_data->is_master)) ||
-		(rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0 &&
-		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)) {
-		rc = cam_irq_controller_unsubscribe_irq(
-			common_data->bus_irq_controller,
-			comp_grp->irq_handle);
-	}
 	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 
 	return rc;
@@ -1815,55 +1789,11 @@
 static int cam_vfe_bus_handle_comp_done_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	int32_t                                     rc;
-	int                                         i;
-	struct cam_isp_resource_node               *comp_grp = NULL;
-	struct cam_vfe_bus_ver2_comp_grp_data      *rsrc_data = NULL;
-	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
-
-	comp_grp = th_payload->handler_priv;
-	if (!comp_grp) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
-		return -ENODEV;
-	}
-
-	rsrc_data = comp_grp->res_priv;
-
-	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
-	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
-	CAM_DBG(CAM_ISP, "IRQ status_2 = 0x%x", th_payload->evt_status_arr[2]);
-
-	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
-	if (rc) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue");
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
-			th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1],
-			th_payload->evt_status_arr[2]);
-
-		return rc;
-	}
-
-	cam_isp_hw_get_timestamp(&evt_payload->ts);
-
-	evt_payload->ctx = rsrc_data->ctx;
-	evt_payload->core_index = rsrc_data->common_data->core_index;
-	evt_payload->evt_id  = evt_id;
-
-	for (i = 0; i < th_payload->num_registers; i++)
-		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
-
-	th_payload->evt_payload_priv = evt_payload;
-
-	CAM_DBG(CAM_ISP, "Exit");
-	return rc;
+	return -EPERM;
 }
 
-static int cam_vfe_bus_handle_comp_done_bottom_half(
-	void                *handler_priv,
-	void                *evt_payload_priv)
+static int cam_vfe_bus_handle_comp_done_bottom_half(void *handler_priv,
+	void  *evt_payload_priv, uint32_t *comp_mask)
 {
 	int rc = CAM_VFE_IRQ_STATUS_ERR;
 	struct cam_isp_resource_node          *comp_grp = handler_priv;
@@ -1898,6 +1828,7 @@
 		if ((status_reg & BIT(11)) &&
 			(comp_err_reg & rsrc_data->composite_mask)) {
 			/* Check for Regular composite error */
+			evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
 			rc = CAM_VFE_IRQ_STATUS_ERR_COMP;
 			break;
 		}
@@ -1906,19 +1837,14 @@
 		/* Check for Regular composite Overwrite */
 		if ((status_reg & BIT(12)) &&
 			(comp_err_reg & rsrc_data->composite_mask)) {
+			evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
 			rc = CAM_VFE_IRQ_STATUS_COMP_OWRT;
 			break;
 		}
 
 		/* Regular Composite SUCCESS */
 		if (status_reg & BIT(comp_grp_id + 5)) {
-			rsrc_data->irq_trigger_cnt++;
-			if (rsrc_data->irq_trigger_cnt ==
-				rsrc_data->acquire_dev_cnt) {
-				cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0] &=
-					~BIT(comp_grp_id + 5);
-				rsrc_data->irq_trigger_cnt = 0;
-			}
+			evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
 			rc = CAM_VFE_IRQ_STATUS_SUCCESS;
 		}
 
@@ -1942,6 +1868,7 @@
 		if ((status_reg & BIT(6)) &&
 			(comp_err_reg & rsrc_data->composite_mask)) {
 			/* Check for DUAL composite error */
+			evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
 			rc = CAM_VFE_IRQ_STATUS_ERR_COMP;
 			break;
 		}
@@ -1950,19 +1877,14 @@
 		comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_DUAL_COMP_OWRT];
 		if ((status_reg & BIT(7)) &&
 			(comp_err_reg & rsrc_data->composite_mask)) {
+			evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
 			rc = CAM_VFE_IRQ_STATUS_COMP_OWRT;
 			break;
 		}
 
 		/* DUAL Composite SUCCESS */
 		if (status_reg & BIT(comp_grp_id)) {
-			rsrc_data->irq_trigger_cnt++;
-			if (rsrc_data->irq_trigger_cnt ==
-				rsrc_data->acquire_dev_cnt) {
-				cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2] &=
-					~BIT(comp_grp_id);
-				rsrc_data->irq_trigger_cnt = 0;
-			}
+			evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
 			rc = CAM_VFE_IRQ_STATUS_SUCCESS;
 		}
 
@@ -1974,9 +1896,7 @@
 		break;
 	}
 
-	if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
-		cam_vfe_bus_put_evt_payload(rsrc_data->common_data,
-			&evt_payload);
+	*comp_mask = rsrc_data->composite_mask;
 
 	return rc;
 }
@@ -2011,11 +1931,10 @@
 		&& rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)
 		list_add_tail(&comp_grp->list, &ver2_bus_priv->free_comp_grp);
 
-	comp_grp->start = cam_vfe_bus_start_comp_grp;
-	comp_grp->stop = cam_vfe_bus_stop_comp_grp;
+	comp_grp->start = NULL;
+	comp_grp->stop = NULL;
 	comp_grp->top_half_handler = cam_vfe_bus_handle_comp_done_top_half;
-	comp_grp->bottom_half_handler =
-		cam_vfe_bus_handle_comp_done_bottom_half;
+	comp_grp->bottom_half_handler = NULL;
 	comp_grp->hw_intf = ver2_bus_priv->common_data.hw_intf;
 
 	return 0;
@@ -2071,7 +1990,6 @@
 	enum cam_vfe_bus_ver2_vfe_out_type      vfe_out_res_id;
 	uint32_t                                format;
 	int                                     num_wm;
-	uint32_t                                subscribe_irq;
 	uint32_t                                client_done_mask;
 	struct cam_vfe_bus_ver2_priv           *ver2_bus_priv = bus_priv;
 	struct cam_vfe_acquire_args            *acq_args = acquire_args;
@@ -2108,8 +2026,10 @@
 	}
 
 	rsrc_data = rsrc_node->res_priv;
-	secure_caps = cam_vfe_bus_can_be_secure(
-		rsrc_data->out_type);
+	rsrc_data->common_data->event_cb = acq_args->event_cb;
+	rsrc_data->priv = acq_args->priv;
+
+	secure_caps = cam_vfe_bus_can_be_secure(rsrc_data->out_type);
 	mode = out_acquire_args->out_port_info->secure_mode;
 	mutex_lock(&rsrc_data->common_data->bus_mutex);
 	if (secure_caps) {
@@ -2152,7 +2072,6 @@
 		rc = cam_vfe_bus_acquire_comp_grp(ver2_bus_priv,
 			out_acquire_args->out_port_info,
 			acq_args->tasklet,
-			out_acquire_args->ctx,
 			out_acquire_args->unique_id,
 			out_acquire_args->is_dual,
 			out_acquire_args->is_master,
@@ -2165,10 +2084,6 @@
 				vfe_out_res_id, rc);
 			return rc;
 		}
-
-		subscribe_irq = 0;
-	} else {
-		subscribe_irq = 1;
 	}
 
 	/* Reserve WM */
@@ -2176,10 +2091,8 @@
 		rc = cam_vfe_bus_acquire_wm(ver2_bus_priv,
 			out_acquire_args->out_port_info,
 			acq_args->tasklet,
-			out_acquire_args->ctx,
 			vfe_out_res_id,
 			i,
-			subscribe_irq,
 			&rsrc_data->wm_res[i],
 			&client_done_mask,
 			out_acquire_args->is_dual);
@@ -2284,6 +2197,7 @@
 	int rc = 0, i;
 	struct cam_vfe_bus_ver2_vfe_out_data  *rsrc_data = NULL;
 	struct cam_vfe_bus_ver2_common_data   *common_data = NULL;
+	uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX];
 
 	if (!vfe_out) {
 		CAM_ERR(CAM_ISP, "Invalid input");
@@ -2301,11 +2215,29 @@
 		return -EACCES;
 	}
 
+	memset(bus_irq_reg_mask, 0, sizeof(bus_irq_reg_mask));
 	for (i = 0; i < rsrc_data->num_wm; i++)
-		rc = cam_vfe_bus_start_wm(rsrc_data->wm_res[i]);
+		rc = cam_vfe_bus_start_wm(rsrc_data->wm_res[i],
+			bus_irq_reg_mask);
 
-	if (rsrc_data->comp_grp)
-		rc = cam_vfe_bus_start_comp_grp(rsrc_data->comp_grp);
+	if (rsrc_data->comp_grp) {
+		memset(bus_irq_reg_mask, 0, sizeof(bus_irq_reg_mask));
+		rc = cam_vfe_bus_start_comp_grp(rsrc_data->comp_grp,
+			bus_irq_reg_mask);
+	}
+
+	vfe_out->irq_handle = cam_irq_controller_subscribe_irq(
+		common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
+		bus_irq_reg_mask, vfe_out, vfe_out->top_half_handler,
+		vfe_out->bottom_half_handler, vfe_out->tasklet_info,
+		&tasklet_bh_api);
+
+	if (vfe_out->irq_handle < 1) {
+		CAM_ERR(CAM_ISP, "Subscribe IRQ failed for res_id %d",
+			vfe_out->res_id);
+		vfe_out->irq_handle = 0;
+		return -EFAULT;
+	}
 
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 	return rc;
@@ -2316,6 +2248,7 @@
 {
 	int rc = 0, i;
 	struct cam_vfe_bus_ver2_vfe_out_data  *rsrc_data = NULL;
+	struct cam_vfe_bus_ver2_common_data   *common_data = NULL;
 
 	if (!vfe_out) {
 		CAM_ERR(CAM_ISP, "Invalid input");
@@ -2323,6 +2256,7 @@
 	}
 
 	rsrc_data = vfe_out->res_priv;
+	common_data = rsrc_data->common_data;
 
 	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
 		vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
@@ -2336,6 +2270,13 @@
 	for (i = 0; i < rsrc_data->num_wm; i++)
 		rc = cam_vfe_bus_stop_wm(rsrc_data->wm_res[i]);
 
+	if (vfe_out->irq_handle) {
+		rc = cam_irq_controller_unsubscribe_irq(
+			common_data->bus_irq_controller,
+			vfe_out->irq_handle);
+		vfe_out->irq_handle = 0;
+	}
+
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	return rc;
 }
@@ -2343,16 +2284,66 @@
 static int cam_vfe_bus_handle_vfe_out_done_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                     rc;
+	int                                         i;
+	struct cam_isp_resource_node               *vfe_out = NULL;
+	struct cam_vfe_bus_ver2_vfe_out_data       *rsrc_data = NULL;
+	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
+
+	vfe_out = th_payload->handler_priv;
+	if (!vfe_out) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
+		return -ENODEV;
+	}
+
+	rsrc_data = vfe_out->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+	CAM_DBG(CAM_ISP, "IRQ status_2 = 0x%x", th_payload->evt_status_arr[2]);
+
+	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1],
+			th_payload->evt_status_arr[2]);
+
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	evt_payload->core_index = rsrc_data->common_data->core_index;
+	evt_payload->evt_id  = evt_id;
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_bus_handle_vfe_out_done_bottom_half(
-	void                *handler_priv,
-	void                *evt_payload_priv)
+	void                                     *handler_priv,
+	void                                     *evt_payload_priv)
 {
 	int rc = -EINVAL;
-	struct cam_isp_resource_node         *vfe_out = handler_priv;
-	struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+	struct cam_isp_resource_node             *vfe_out = handler_priv;
+	struct cam_vfe_bus_ver2_vfe_out_data     *rsrc_data = vfe_out->res_priv;
+	struct cam_isp_hw_event_info              evt_info;
+	void                                     *ctx = NULL;
+	uint32_t                                  evt_id = 0;
+	uint32_t                                  comp_mask = 0;
+	int                                       num_out = 0, i = 0;
+	struct cam_vfe_bus_irq_evt_payload       *evt_payload =
+		evt_payload_priv;
+	uint32_t                   out_list[CAM_VFE_BUS_VER2_VFE_OUT_MAX] = {0};
 
 	/*
 	 * If this resource has Composite Group then we only handle
@@ -2360,13 +2351,42 @@
 	 * So Else case is only one individual buf_done = WM[0].
 	 */
 	if (rsrc_data->comp_grp) {
-		rc = rsrc_data->comp_grp->bottom_half_handler(
-			rsrc_data->comp_grp, evt_payload_priv);
+		rc = cam_vfe_bus_handle_comp_done_bottom_half(
+			rsrc_data->comp_grp, evt_payload_priv, &comp_mask);
 	} else {
 		rc = rsrc_data->wm_res[0]->bottom_half_handler(
 			rsrc_data->wm_res[0], evt_payload_priv);
 	}
 
+	ctx = rsrc_data->priv;
+
+	switch (rc) {
+	case CAM_VFE_IRQ_STATUS_SUCCESS:
+		evt_id = evt_payload->evt_id;
+
+		evt_info.res_type = vfe_out->res_type;
+		evt_info.hw_idx   = vfe_out->hw_intf->hw_idx;
+		if (rsrc_data->comp_grp) {
+			cam_vfe_bus_get_comp_vfe_out_res_id_list(
+				comp_mask, out_list, &num_out);
+			for (i = 0; i < num_out; i++) {
+				evt_info.res_id = out_list[i];
+				if (rsrc_data->common_data->event_cb)
+					rsrc_data->common_data->event_cb(ctx,
+						evt_id, (void *)&evt_info);
+			}
+		} else {
+			evt_info.res_id = vfe_out->res_id;
+			if (rsrc_data->common_data->event_cb)
+				rsrc_data->common_data->event_cb(ctx, evt_id,
+					(void *)&evt_info);
+		}
+		break;
+	default:
+		break;
+	}
+
+	cam_vfe_bus_put_evt_payload(rsrc_data->common_data, &evt_payload);
 	CAM_DBG(CAM_ISP, "vfe_out %d rc %d", rsrc_data->out_type, rc);
 
 	return rc;
@@ -2426,6 +2446,7 @@
 		cam_vfe_bus_handle_vfe_out_done_bottom_half;
 	vfe_out->process_cmd = cam_vfe_bus_process_cmd;
 	vfe_out->hw_intf = ver2_bus_priv->common_data.hw_intf;
+	vfe_out->irq_handle = 0;
 
 	return 0;
 }
@@ -2449,6 +2470,7 @@
 	vfe_out->top_half_handler = NULL;
 	vfe_out->bottom_half_handler = NULL;
 	vfe_out->hw_intf = NULL;
+	vfe_out->irq_handle = 0;
 
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
 	INIT_LIST_HEAD(&vfe_out->list);
@@ -2502,7 +2524,6 @@
 
 	evt_payload->core_index = bus_priv->common_data.core_index;
 	evt_payload->evt_id  = evt_id;
-	evt_payload->ctx = &bus_priv->common_data;
 	evt_payload->debug_status_0 = cam_io_r_mb(
 		bus_priv->common_data.mem_base +
 		bus_priv->common_data.common_reg->debug_status_0);
@@ -3381,8 +3402,9 @@
 		NULL,
 		NULL);
 
-	if ((int)bus_priv->irq_handle <= 0) {
+	if (bus_priv->irq_handle < 1) {
 		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+		bus_priv->irq_handle = 0;
 		return -EFAULT;
 	}
 
@@ -3397,9 +3419,10 @@
 			bus_priv->tasklet_info,
 			&tasklet_bh_api);
 
-		if ((int)bus_priv->error_irq_handle <= 0) {
+		if (bus_priv->error_irq_handle < 1) {
 			CAM_ERR(CAM_ISP, "Failed to subscribe BUS error IRQ %d",
 				bus_priv->error_irq_handle);
+			bus_priv->error_irq_handle = 0;
 			return -EFAULT;
 		}
 	}
@@ -3441,10 +3464,6 @@
 		rc = cam_irq_controller_unsubscribe_irq(
 			bus_priv->common_data.bus_irq_controller,
 			bus_priv->error_irq_handle);
-		if (rc)
-			CAM_ERR(CAM_ISP,
-				"Failed to unsubscribe error irq rc=%d", rc);
-
 		bus_priv->error_irq_handle = 0;
 	}
 
@@ -3452,10 +3471,6 @@
 		rc = cam_irq_controller_unsubscribe_irq(
 			bus_priv->common_data.vfe_irq_controller,
 			bus_priv->irq_handle);
-		if (rc)
-			CAM_ERR(CAM_ISP,
-				"Failed to unsubscribe irq rc=%d", rc);
-
 		bus_priv->irq_handle = 0;
 	}
 
@@ -3507,11 +3522,6 @@
 			rc = cam_irq_controller_unsubscribe_irq(
 				bus_priv->common_data.bus_irq_controller,
 				bus_priv->error_irq_handle);
-			if (rc)
-				CAM_ERR(CAM_ISP,
-					"Failed to unsubscribe error irq rc=%d",
-					rc);
-
 			bus_priv->error_irq_handle = 0;
 		}
 		break;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
index 3358c1e..803e193 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
@@ -45,11 +45,6 @@
 	0x00000000,
 };
 
-static uint32_t rup_irq_mask[2] = {
-	0x0000003F,
-	0x00000000,
-};
-
 enum cam_vfe_bus_ver3_packer_format {
 	PACKER_FMT_VER3_PLAIN_128,
 	PACKER_FMT_VER3_PLAIN_8,
@@ -84,14 +79,16 @@
 	uint32_t                                    secure_mode;
 	uint32_t                                    num_sec_out;
 	uint32_t                                    addr_no_sync;
+	uint32_t                                    comp_done_shift;
 	bool                                        is_lite;
+	cam_hw_mgr_event_cb_func                    event_cb;
+	int                        rup_irq_handle[CAM_VFE_BUS_VER3_SRC_GRP_MAX];
 };
 
 struct cam_vfe_bus_ver3_wm_resource_data {
 	uint32_t             index;
 	struct cam_vfe_bus_ver3_common_data            *common_data;
 	struct cam_vfe_bus_ver3_reg_offset_bus_client  *hw_regs;
-	void                                           *ctx;
 
 	bool                 init_cfg_done;
 	bool                 hfr_cfg_done;
@@ -132,25 +129,22 @@
 struct cam_vfe_bus_ver3_comp_grp_data {
 	enum cam_vfe_bus_ver3_comp_grp_type          comp_grp_type;
 	struct cam_vfe_bus_ver3_common_data         *common_data;
-	struct cam_vfe_bus_ver3_reg_offset_comp_grp *hw_regs;
-
-	uint32_t                                     irq_enabled;
 
 	uint32_t                                     is_master;
 	uint32_t                                     is_dual;
 	uint32_t                                     dual_slave_core;
 	uint32_t                                     intra_client_mask;
 	uint32_t                                     addr_sync_mode;
+	uint32_t                                     composite_mask;
 
 	uint32_t                                     acquire_dev_cnt;
 	uint32_t                                     irq_trigger_cnt;
 	uint32_t                                     ubwc_static_ctrl;
-
-	void                                        *ctx;
 };
 
 struct cam_vfe_bus_ver3_vfe_out_data {
 	uint32_t                              out_type;
+	uint32_t                              source_group;
 	struct cam_vfe_bus_ver3_common_data  *common_data;
 
 	uint32_t                         num_wm;
@@ -166,12 +160,14 @@
 	uint32_t                         max_height;
 	struct cam_cdm_utils_ops        *cdm_util_ops;
 	uint32_t                         secure_mode;
+	void                            *priv;
 };
 
 struct cam_vfe_bus_ver3_priv {
 	struct cam_vfe_bus_ver3_common_data common_data;
 	uint32_t                            num_client;
 	uint32_t                            num_out;
+	uint32_t                            top_irq_shift;
 
 	struct cam_isp_resource_node  bus_client[CAM_VFE_BUS_VER3_MAX_CLIENTS];
 	struct cam_isp_resource_node  comp_grp[CAM_VFE_BUS_VER3_COMP_GRP_MAX];
@@ -180,9 +176,8 @@
 	struct list_head                    free_comp_grp;
 	struct list_head                    used_comp_grp;
 
-	uint32_t                            irq_handle;
-	uint32_t                            error_irq_handle;
-	uint32_t                            rup_irq_handle;
+	int                                 irq_handle;
+	int                                 error_irq_handle;
 	void                               *tasklet_info;
 };
 
@@ -217,8 +212,6 @@
 	struct cam_vfe_bus_irq_evt_payload     **evt_payload)
 {
 	struct cam_vfe_bus_ver3_common_data *common_data = NULL;
-	uint32_t  *ife_irq_regs = NULL;
-	uint32_t   status_reg0, status_reg1;
 	unsigned long flags;
 
 	if (!core_info) {
@@ -229,16 +222,6 @@
 		CAM_ERR(CAM_ISP, "No payload to put");
 		return -EINVAL;
 	}
-	(*evt_payload)->error_type = 0;
-	ife_irq_regs = (*evt_payload)->irq_reg_val;
-	status_reg0 = ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
-	status_reg1 = ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS1];
-
-	if (status_reg0 || status_reg1) {
-		CAM_DBG(CAM_ISP, "status0 0x%x status1 0x%x",
-			status_reg0, status_reg1);
-		return 0;
-	}
 
 	common_data = core_info;
 
@@ -320,7 +303,6 @@
 	case CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP:
 		return true;
 
-	case CAM_VFE_BUS_VER3_VFE_OUT_PDAF:
 	case CAM_VFE_BUS_VER3_VFE_OUT_2PD:
 	case CAM_VFE_BUS_VER3_VFE_OUT_LCR:
 	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE:
@@ -351,8 +333,6 @@
 		return CAM_VFE_BUS_VER3_VFE_OUT_FD;
 	case CAM_ISP_IFE_OUT_RES_RAW_DUMP:
 		return CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP;
-	case CAM_ISP_IFE_OUT_RES_PDAF:
-		return CAM_VFE_BUS_VER3_VFE_OUT_PDAF;
 	case CAM_ISP_IFE_OUT_RES_2PD:
 		return CAM_VFE_BUS_VER3_VFE_OUT_2PD;
 	case CAM_ISP_IFE_OUT_RES_RDI_0:
@@ -484,17 +464,6 @@
 			break;
 		}
 		break;
-	case CAM_VFE_BUS_VER3_VFE_OUT_PDAF:
-		switch (format) {
-		case CAM_FORMAT_PLAIN8:
-		case CAM_FORMAT_PLAIN16_10:
-		case CAM_FORMAT_PLAIN16_12:
-		case CAM_FORMAT_PLAIN16_14:
-			return 1;
-		default:
-			break;
-		}
-		break;
 	case CAM_VFE_BUS_VER3_VFE_OUT_2PD:
 		switch (format) {
 		case CAM_FORMAT_PLAIN16_8:
@@ -647,19 +616,10 @@
 			break;
 		}
 		break;
-	case CAM_VFE_BUS_VER3_VFE_OUT_PDAF:
-		switch (plane) {
-		case PLANE_Y:
-			wm_idx = 21;
-			break;
-		default:
-			break;
-		}
-		break;
 	case CAM_VFE_BUS_VER3_VFE_OUT_2PD:
 		switch (plane) {
 		case PLANE_Y:
-			wm_idx = 11;
+			wm_idx = 21;
 			break;
 		default:
 			break;
@@ -791,6 +751,103 @@
 	return wm_idx;
 }
 
+static int cam_vfe_bus_ver3_get_comp_vfe_out_res_id_list(
+	uint32_t comp_mask, uint32_t *out_list, int *num_out, bool is_lite)
+{
+	int count = 0;
+
+	if (is_lite)
+		goto vfe_lite;
+
+	if (comp_mask & 0x3)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_FULL;
+
+	if (comp_mask & 0x4)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS4;
+
+	if (comp_mask & 0x8)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS16;
+
+	if (comp_mask & 0x30)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_FULL_DISP;
+
+	if (comp_mask & 0x40)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS4_DISP;
+
+	if (comp_mask & 0x80)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_DS16_DISP;
+
+	if (comp_mask & 0x300)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_FD;
+
+	if (comp_mask & 0x400)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RAW_DUMP;
+
+	if (comp_mask & 0x800)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_2PD;
+
+	if (comp_mask & 0x1000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_HDR_BE;
+
+	if (comp_mask & 0x2000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST;
+
+	if (comp_mask & 0x4000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_TL_BG;
+
+	if (comp_mask & 0x8000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_AWB_BG;
+
+	if (comp_mask & 0x10000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_BHIST;
+
+	if (comp_mask & 0x20000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_RS;
+
+	if (comp_mask & 0x40000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_CS;
+
+	if (comp_mask & 0x80000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_IHIST;
+
+	if (comp_mask & 0x100000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_STATS_BF;
+
+	if (comp_mask & 0x200000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_PDAF;
+
+	if (comp_mask & 0x400000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_LCR;
+
+	if (comp_mask & 0x800000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_0;
+
+	if (comp_mask & 0x1000000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_1;
+
+	if (comp_mask & 0x2000000)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_2;
+
+	*num_out = count;
+	return 0;
+
+vfe_lite:
+	if (comp_mask & 0x1)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_0;
+
+	if (comp_mask & 0x2)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_1;
+
+	if (comp_mask & 0x4)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_2;
+
+	if (comp_mask & 0x8)
+		out_list[count++] = CAM_ISP_IFE_OUT_RES_RDI_3;
+
+	*num_out = count;
+	return 0;
+}
+
 static enum cam_vfe_bus_ver3_packer_format
 	cam_vfe_bus_ver3_get_packer_fmt(uint32_t out_fmt, int wm_index)
 {
@@ -844,35 +901,34 @@
 {
 	int32_t                                     rc;
 	int                                         i;
-	struct cam_vfe_bus_ver3_priv               *bus_priv;
+	struct cam_isp_resource_node               *vfe_out = NULL;
+	struct cam_vfe_bus_ver3_vfe_out_data       *rsrc_data = NULL;
 	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
 
-	bus_priv = th_payload->handler_priv;
-	if (!bus_priv) {
+	vfe_out = th_payload->handler_priv;
+	if (!vfe_out) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
 		return -ENODEV;
 	}
 
-	CAM_DBG(CAM_ISP, "bus_IRQ status_0 = 0x%x, bus_IRQ status_1 = 0x%x",
-		th_payload->evt_status_arr[0],
-		th_payload->evt_status_arr[1]);
+	rsrc_data = vfe_out->res_priv;
 
-	rc  = cam_vfe_bus_ver3_get_evt_payload(&bus_priv->common_data,
+	CAM_DBG(CAM_ISP, "BUS_IRQ status_0 = 0x%x",
+		th_payload->evt_status_arr[0]);
+
+	rc  = cam_vfe_bus_ver3_get_evt_payload(rsrc_data->common_data,
 		&evt_payload);
 	if (rc) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
 			"No tasklet_cmd is free in queue");
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"IRQ status_0 = 0x%x status_1 = 0x%x",
-			th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1]);
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status_0 = 0x%x",
+			th_payload->evt_status_arr[0]);
 
 		return rc;
 	}
 
-	evt_payload->core_index = bus_priv->common_data.core_index;
+	evt_payload->core_index = rsrc_data->common_data->core_index;
 	evt_payload->evt_id  = evt_id;
-	evt_payload->ctx = &bus_priv->common_data;
 	for (i = 0; i < th_payload->num_registers; i++)
 		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
 	th_payload->evt_payload_priv = evt_payload;
@@ -885,7 +941,10 @@
 {
 	int                                   ret = CAM_VFE_IRQ_STATUS_ERR;
 	struct cam_vfe_bus_irq_evt_payload   *payload;
-	uint32_t                              irq_status0;
+	struct cam_isp_resource_node         *vfe_out = NULL;
+	struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL;
+	struct cam_isp_hw_event_info          evt_info;
+	uint32_t                              irq_status;
 
 	if (!handler_priv || !evt_payload_priv) {
 		CAM_ERR(CAM_ISP, "Invalid params");
@@ -893,18 +952,103 @@
 	}
 
 	payload = evt_payload_priv;
-	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
+	vfe_out = handler_priv;
+	rsrc_data = vfe_out->res_priv;
 
-	if (irq_status0 & 0x3F) {
-		CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
-		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	if (!rsrc_data->common_data->event_cb) {
+		CAM_ERR(CAM_ISP, "Callback to HW MGR not found");
+		return ret;
 	}
-	CAM_DBG(CAM_ISP,
-		"event ID:%d, bus_irq_status_0 = 0x%x returning status = %d",
-		payload->evt_id, irq_status0, ret);
 
-	if (ret == CAM_VFE_IRQ_STATUS_SUCCESS)
-		cam_vfe_bus_ver3_put_evt_payload(payload->ctx, &payload);
+	irq_status = payload->irq_reg_val[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
+
+	evt_info.hw_idx = rsrc_data->common_data->core_index;
+	evt_info.res_type = CAM_ISP_RESOURCE_VFE_IN;
+
+	if (!rsrc_data->common_data->is_lite) {
+		if (irq_status & 0x1) {
+			CAM_DBG(CAM_ISP, "Received CAMIF RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_CAMIF;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x2) {
+			CAM_DBG(CAM_ISP, "Received PDLIB RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_PDLIB;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x4)
+			CAM_DBG(CAM_ISP, "Received LCR RUP");
+
+		if (irq_status & 0x8) {
+			CAM_DBG(CAM_ISP, "Received RDI0 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI0;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x10) {
+			CAM_DBG(CAM_ISP, "Received RDI1 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI1;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x20) {
+			CAM_DBG(CAM_ISP, "Received RDI2 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI2;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+	} else {
+		if (irq_status & 0x1) {
+			CAM_DBG(CAM_ISP, "Received RDI0 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI0;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x2) {
+			CAM_DBG(CAM_ISP, "Received RDI1 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI1;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x4) {
+			CAM_DBG(CAM_ISP, "Received RDI2 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI2;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+
+		if (irq_status & 0x8) {
+			CAM_DBG(CAM_ISP, "Received RDI3 RUP");
+			evt_info.res_id = CAM_ISP_HW_VFE_IN_RDI3;
+			rsrc_data->common_data->event_cb(
+				rsrc_data->priv, CAM_ISP_HW_EVENT_REG_UPDATE,
+				(void *)&evt_info);
+		}
+	}
+
+	ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+
+	CAM_DBG(CAM_ISP,
+		"event ID:%d, bus_irq_status_0 = 0x%x rc = %d",
+		payload->evt_id, irq_status, ret);
+
+	cam_vfe_bus_ver3_put_evt_payload(rsrc_data->common_data, &payload);
 
 	return ret;
 }
@@ -913,10 +1057,10 @@
 	struct cam_vfe_bus_ver3_priv          *ver3_bus_priv,
 	struct cam_isp_out_port_info          *out_port_info,
 	void                                  *tasklet,
-	void                                  *ctx,
 	enum cam_vfe_bus_ver3_vfe_out_type     vfe_out_res_id,
 	enum cam_vfe_bus_plane_type            plane,
 	struct cam_isp_resource_node         **wm_res,
+	uint32_t                              *client_done_mask,
 	uint32_t                               is_dual,
 	enum cam_vfe_bus_ver3_comp_grp_type   *comp_grp_id)
 {
@@ -945,7 +1089,6 @@
 	wm_res_local->tasklet_info = tasklet;
 
 	rsrc_data = wm_res_local->res_priv;
-	rsrc_data->ctx = ctx;
 	rsrc_data->format = out_port_info->format;
 	rsrc_data->pack_fmt = cam_vfe_bus_ver3_get_packer_fmt(rsrc_data->format,
 		wm_idx);
@@ -1097,15 +1240,12 @@
 		rsrc_data->height = 0;
 		rsrc_data->stride = 1;
 		rsrc_data->en_cfg = (0x1 << 16) | 0x1;
-	} else if (rsrc_data->index == 11 || rsrc_data->index == 21) {
-		/* WM 21/11 PDAF/2PD */
+	} else if (rsrc_data->index == 21) {
+		/* WM 21 2PD */
 		rsrc_data->width = 0;
 		rsrc_data->height = 0;
 		rsrc_data->stride = 1;
 		rsrc_data->en_cfg = (0x1 << 16) | 0x1;
-		if (vfe_out_res_id == CAM_VFE_BUS_VER3_VFE_OUT_PDAF)
-			/* LSB aligned */
-			rsrc_data->pack_fmt |= 0x10;
 	} else if (rsrc_data->index == 10) {
 		/* WM 10 Raw dump */
 		rsrc_data->stride = rsrc_data->width;
@@ -1115,10 +1255,8 @@
 	} else if (rsrc_data->index == 22) {
 		switch (rsrc_data->format) {
 		case CAM_FORMAT_PLAIN16_16:
-			rsrc_data->width = 0;
-			rsrc_data->height = 0;
-			rsrc_data->stride = 1;
-			rsrc_data->en_cfg = (0x1 << 16) | 0x1;
+			rsrc_data->stride = ALIGNUP(rsrc_data->width * 2, 8);
+			rsrc_data->en_cfg = 0x1;
 			/* LSB aligned */
 			rsrc_data->pack_fmt |= 0x10;
 			break;
@@ -1127,16 +1265,22 @@
 				rsrc_data->format);
 			return -EINVAL;
 		}
-	} else {
+	} else if ((rsrc_data->index == 2) || (rsrc_data->index == 3) ||
+		(rsrc_data->index == 6) || (rsrc_data->index == 7)) {
 		/* Write master 2-3 and 6-7 DS ports */
 
 		rsrc_data->height = rsrc_data->height / 2;
 		rsrc_data->width  = rsrc_data->width / 2;
 		rsrc_data->en_cfg = 0x1;
+
+	} else {
+		CAM_ERR(CAM_ISP, "Invalid WM:%d requested", rsrc_data->index);
+		return -EINVAL;
 	}
 
 	*wm_res = wm_res_local;
 	*comp_grp_id = rsrc_data->hw_regs->comp_group;
+	*client_done_mask |= (1 << wm_idx);
 
 	CAM_DBG(CAM_ISP,
 		"WM:%d processed width:%d height:%d format:0x%x en_ubwc:%d",
@@ -1329,6 +1473,15 @@
 	return 0;
 }
 
+static void cam_vfe_bus_ver3_add_wm_to_comp_grp(
+	struct cam_isp_resource_node    *comp_grp,
+	uint32_t                         composite_mask)
+{
+	struct cam_vfe_bus_ver3_comp_grp_data  *rsrc_data = comp_grp->res_priv;
+
+	rsrc_data->composite_mask |= composite_mask;
+}
+
 static bool cam_vfe_bus_ver3_match_comp_grp(
 	struct cam_vfe_bus_ver3_priv           *ver3_bus_priv,
 	struct cam_isp_resource_node          **comp_grp,
@@ -1368,7 +1521,6 @@
 	struct cam_vfe_bus_ver3_priv        *ver3_bus_priv,
 	struct cam_isp_out_port_info        *out_port_info,
 	void                                *tasklet,
-	void                                *ctx,
 	uint32_t                             is_dual,
 	uint32_t                             is_master,
 	enum cam_vfe_bus_ver3_vfe_core_id    dual_slave_core,
@@ -1429,7 +1581,6 @@
 
 	CAM_DBG(CAM_ISP, "Acquire comp_grp:%u", rsrc_data->comp_grp_type);
 
-	rsrc_data->ctx = ctx;
 	rsrc_data->acquire_dev_cnt++;
 	*comp_grp = comp_grp_local;
 
@@ -1482,6 +1633,7 @@
 
 		in_rsrc_data->dual_slave_core = CAM_VFE_BUS_VER3_VFE_CORE_MAX;
 		in_rsrc_data->addr_sync_mode = 0;
+		in_rsrc_data->composite_mask = 0;
 
 		comp_grp->tasklet_info = NULL;
 		comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
@@ -1493,19 +1645,19 @@
 }
 
 static int cam_vfe_bus_ver3_start_comp_grp(
-	struct cam_isp_resource_node *comp_grp)
+	struct cam_isp_resource_node *comp_grp, uint32_t *bus_irq_reg_mask)
 {
 	int rc = 0;
 	uint32_t val;
 	struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data = NULL;
 	struct cam_vfe_bus_ver3_common_data *common_data = NULL;
-	uint32_t bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_MAX] = {0};
 
 	rsrc_data = comp_grp->res_priv;
 	common_data = rsrc_data->common_data;
 
-	CAM_DBG(CAM_ISP, "comp_grp_type:%d streaming state:%d",
-		rsrc_data->comp_grp_type, comp_grp->res_state);
+	CAM_DBG(CAM_ISP, "comp_grp_type:%d streaming state:%d mask:0x%x",
+		rsrc_data->comp_grp_type, comp_grp->res_state,
+		rsrc_data->composite_mask);
 
 	if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
 		return 0;
@@ -1547,28 +1699,13 @@
 	}
 
 	bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0] =
-		(0x1 << (rsrc_data->comp_grp_type + 6));
+		(0x1 << (rsrc_data->comp_grp_type +
+		rsrc_data->common_data->comp_done_shift));
 
-	/*
-	 * For Dual composite subscribe IRQ only for master
-	 * For regular composite, subscribe IRQ always
-	 */
-	CAM_DBG(CAM_ISP, "Subscribe comp_grp_type:%d IRQ",
-		rsrc_data->comp_grp_type);
-	if ((rsrc_data->is_dual && rsrc_data->is_master) ||
-		(!rsrc_data->is_dual)) {
-		comp_grp->irq_handle = cam_irq_controller_subscribe_irq(
-			common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
-			bus_irq_reg_mask, comp_grp,
-			comp_grp->top_half_handler,
-			cam_ife_mgr_do_tasklet_buf_done,
-			comp_grp->tasklet_info, &tasklet_bh_api);
-		if (comp_grp->irq_handle < 0) {
-			CAM_ERR(CAM_ISP, "Subscribe IRQ failed for comp_grp %d",
-				rsrc_data->comp_grp_type);
-			return -EFAULT;
-		}
-	}
+	CAM_DBG(CAM_ISP, "VFE start COMP_GRP:%d bus_irq_mask_0 0x%x",
+		rsrc_data->comp_grp_type,
+		bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0]);
+
 	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
 	return rc;
@@ -1577,76 +1714,21 @@
 static int cam_vfe_bus_ver3_stop_comp_grp(
 	struct cam_isp_resource_node          *comp_grp)
 {
-	int rc = 0;
-	struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data = NULL;
-	struct cam_vfe_bus_ver3_common_data   *common_data = NULL;
-
-	rsrc_data = comp_grp->res_priv;
-	common_data = rsrc_data->common_data;
-
-	/* Unsubscribe IRQ */
-	if ((rsrc_data->is_dual && rsrc_data->is_master) ||
-		(!rsrc_data->is_dual)) {
-		rc = cam_irq_controller_unsubscribe_irq(
-			common_data->bus_irq_controller,
-			comp_grp->irq_handle);
-	}
 	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 
-	return rc;
+	return 0;
 }
 
 static int cam_vfe_bus_ver3_handle_comp_done_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	int32_t                                     rc;
-	int                                         i;
-	struct cam_isp_resource_node               *comp_grp = NULL;
-	struct cam_vfe_bus_ver3_comp_grp_data      *rsrc_data = NULL;
-	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
-
-	comp_grp = th_payload->handler_priv;
-	if (!comp_grp) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
-		return -ENODEV;
-	}
-
-	rsrc_data = comp_grp->res_priv;
-
-	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
-	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
-
-	rc  = cam_vfe_bus_ver3_get_evt_payload(rsrc_data->common_data,
-		&evt_payload);
-	if (rc) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue");
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"IRQ status_0 = 0x%x status_1 = 0x%x",
-			th_payload->evt_status_arr[0],
-			th_payload->evt_status_arr[1]);
-
-		return rc;
-	}
-
-	cam_isp_hw_get_timestamp(&evt_payload->ts);
-
-	evt_payload->ctx = rsrc_data->ctx;
-	evt_payload->core_index = rsrc_data->common_data->core_index;
-	evt_payload->evt_id  = evt_id;
-
-	for (i = 0; i < th_payload->num_registers; i++)
-		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
-
-	th_payload->evt_payload_priv = evt_payload;
-
-	CAM_DBG(CAM_ISP, "Exit");
-	return rc;
+	return -EPERM;
 }
 
 static int cam_vfe_bus_ver3_handle_comp_done_bottom_half(
 	void                *handler_priv,
-	void                *evt_payload_priv)
+	void                *evt_payload_priv,
+	uint32_t            *comp_mask)
 {
 	int rc = CAM_VFE_IRQ_STATUS_ERR;
 	struct cam_isp_resource_node          *comp_grp = handler_priv;
@@ -1669,23 +1751,15 @@
 	cam_ife_irq_regs = evt_payload->irq_reg_val;
 	status0_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
 
-	if (status0_reg & BIT(rsrc_data->comp_grp_type + 6)) {
-		rsrc_data->irq_trigger_cnt++;
-		if (rsrc_data->irq_trigger_cnt ==
-			rsrc_data->acquire_dev_cnt) {
-			cam_ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0] &=
-					~BIT(rsrc_data->comp_grp_type + 6);
-			rsrc_data->irq_trigger_cnt = 0;
-		}
+	if (status0_reg & BIT(rsrc_data->comp_grp_type +
+		rsrc_data->common_data->comp_done_shift)) {
+		evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
 		rc = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
 
-	CAM_DBG(CAM_ISP, "status_0_reg = 0x%x, bit index = %d rc %d",
-		status0_reg, (rsrc_data->comp_grp_type + 6), rc);
+	CAM_DBG(CAM_ISP, "status_0_reg = 0x%x rc %d", status0_reg, rc);
 
-	if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
-		cam_vfe_bus_ver3_put_evt_payload(rsrc_data->common_data,
-			&evt_payload);
+	*comp_mask = rsrc_data->composite_mask;
 
 	return rc;
 }
@@ -1698,6 +1772,7 @@
 {
 	struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data = NULL;
 	struct cam_vfe_soc_private *vfe_soc_private = soc_info->soc_private;
+	int ddr_type = 0;
 
 	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver3_comp_grp_data),
 		GFP_KERNEL);
@@ -1717,7 +1792,9 @@
 		rsrc_data->comp_grp_type != CAM_VFE_BUS_VER3_COMP_GRP_1)
 		rsrc_data->ubwc_static_ctrl = 0;
 	else {
-		if (of_fdt_get_ddrtype() == DDR_TYPE_LPDDR5)
+		ddr_type = of_fdt_get_ddrtype();
+		if ((ddr_type == DDR_TYPE_LPDDR5) ||
+			(ddr_type == DDR_TYPE_LPDDR5X))
 			rsrc_data->ubwc_static_ctrl =
 				vfe_soc_private->ubwc_static_ctrl[1];
 		else
@@ -1727,11 +1804,7 @@
 
 	list_add_tail(&comp_grp->list, &ver3_bus_priv->free_comp_grp);
 
-	comp_grp->start = cam_vfe_bus_ver3_start_comp_grp;
-	comp_grp->stop = cam_vfe_bus_ver3_stop_comp_grp;
 	comp_grp->top_half_handler = cam_vfe_bus_ver3_handle_comp_done_top_half;
-	comp_grp->bottom_half_handler =
-		cam_vfe_bus_ver3_handle_comp_done_bottom_half;
 	comp_grp->hw_intf = ver3_bus_priv->common_data.hw_intf;
 
 	return 0;
@@ -1793,6 +1866,7 @@
 	struct cam_vfe_bus_ver3_vfe_out_data   *rsrc_data = NULL;
 	uint32_t                                secure_caps = 0, mode;
 	enum cam_vfe_bus_ver3_comp_grp_type     comp_grp_id;
+	uint32_t                                client_done_mask = 0;
 
 	if (!bus_priv || !acquire_args) {
 		CAM_ERR(CAM_ISP, "Invalid Param");
@@ -1822,6 +1896,9 @@
 	}
 
 	rsrc_data = rsrc_node->res_priv;
+	rsrc_data->common_data->event_cb = acq_args->event_cb;
+	rsrc_data->priv = acq_args->priv;
+
 	secure_caps = cam_vfe_bus_ver3_can_be_secure(
 		rsrc_data->out_type);
 	mode = out_acquire_args->out_port_info->secure_mode;
@@ -1861,10 +1938,10 @@
 		rc = cam_vfe_bus_ver3_acquire_wm(ver3_bus_priv,
 			out_acquire_args->out_port_info,
 			acq_args->tasklet,
-			out_acquire_args->ctx,
 			vfe_out_res_id,
 			i,
 			&rsrc_data->wm_res[i],
+			&client_done_mask,
 			out_acquire_args->is_dual,
 			&comp_grp_id);
 		if (rc) {
@@ -1880,7 +1957,6 @@
 	rc = cam_vfe_bus_ver3_acquire_comp_grp(ver3_bus_priv,
 		out_acquire_args->out_port_info,
 		acq_args->tasklet,
-		out_acquire_args->ctx,
 		out_acquire_args->is_dual,
 		out_acquire_args->is_master,
 		out_acquire_args->dual_slave_core,
@@ -1894,6 +1970,8 @@
 		return rc;
 	}
 
+	cam_vfe_bus_ver3_add_wm_to_comp_grp(rsrc_data->comp_grp,
+		client_done_mask);
 
 	rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	out_acquire_args->rsrc_node = rsrc_node;
@@ -1984,6 +2062,9 @@
 	int rc = 0, i;
 	struct cam_vfe_bus_ver3_vfe_out_data  *rsrc_data = NULL;
 	struct cam_vfe_bus_ver3_common_data   *common_data = NULL;
+	uint32_t bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_MAX];
+	uint32_t rup_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_MAX];
+	uint32_t source_group = 0;
 
 	if (!vfe_out) {
 		CAM_ERR(CAM_ISP, "Invalid input");
@@ -1992,6 +2073,7 @@
 
 	rsrc_data = vfe_out->res_priv;
 	common_data = rsrc_data->common_data;
+	source_group = rsrc_data->source_group;
 
 	CAM_DBG(CAM_ISP, "Start resource index %d", rsrc_data->out_type);
 
@@ -2004,8 +2086,52 @@
 	for (i = 0; i < rsrc_data->num_wm; i++)
 		rc = cam_vfe_bus_ver3_start_wm(rsrc_data->wm_res[i]);
 
-	if (rsrc_data->comp_grp)
-		rc = cam_vfe_bus_ver3_start_comp_grp(rsrc_data->comp_grp);
+	memset(bus_irq_reg_mask, 0, sizeof(bus_irq_reg_mask));
+	rc = cam_vfe_bus_ver3_start_comp_grp(rsrc_data->comp_grp,
+		bus_irq_reg_mask);
+
+	vfe_out->irq_handle = cam_irq_controller_subscribe_irq(
+		common_data->bus_irq_controller,
+		CAM_IRQ_PRIORITY_1,
+		bus_irq_reg_mask,
+		vfe_out,
+		vfe_out->top_half_handler,
+		vfe_out->bottom_half_handler,
+		vfe_out->tasklet_info,
+		&tasklet_bh_api);
+
+	if (vfe_out->irq_handle < 1) {
+		CAM_ERR(CAM_ISP, "Subscribe IRQ failed for VFE out_res %d",
+			vfe_out->res_id);
+		vfe_out->irq_handle = 0;
+		return -EFAULT;
+	}
+
+	if (!common_data->rup_irq_handle[source_group]) {
+		memset(rup_irq_reg_mask, 0, sizeof(rup_irq_reg_mask));
+		rup_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0] |=
+			0x1 << source_group;
+
+		CAM_DBG(CAM_ISP, "bus_irq_mask_0 for rup 0x%x",
+			rup_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0]);
+
+		common_data->rup_irq_handle[source_group] =
+			cam_irq_controller_subscribe_irq(
+				common_data->bus_irq_controller,
+				CAM_IRQ_PRIORITY_1,
+				rup_irq_reg_mask,
+				vfe_out,
+				cam_vfe_bus_ver3_handle_rup_top_half,
+				cam_vfe_bus_ver3_handle_rup_bottom_half,
+				vfe_out->tasklet_info,
+				&tasklet_bh_api);
+
+		if (common_data->rup_irq_handle[source_group] < 1) {
+			CAM_ERR(CAM_ISP, "Failed to subscribe RUP IRQ");
+			common_data->rup_irq_handle[source_group] = 0;
+			return -EFAULT;
+		}
+	}
 
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 	return rc;
@@ -2016,6 +2142,7 @@
 {
 	int rc = 0, i;
 	struct cam_vfe_bus_ver3_vfe_out_data  *rsrc_data = NULL;
+	struct cam_vfe_bus_ver3_common_data   *common_data = NULL;
 
 	if (!vfe_out) {
 		CAM_ERR(CAM_ISP, "Invalid input");
@@ -2023,6 +2150,7 @@
 	}
 
 	rsrc_data = vfe_out->res_priv;
+	common_data = rsrc_data->common_data;
 
 	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
 		vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
@@ -2030,12 +2158,25 @@
 		return rc;
 	}
 
-	if (rsrc_data->comp_grp)
-		rc = cam_vfe_bus_ver3_stop_comp_grp(rsrc_data->comp_grp);
+	rc = cam_vfe_bus_ver3_stop_comp_grp(rsrc_data->comp_grp);
 
 	for (i = 0; i < rsrc_data->num_wm; i++)
 		rc = cam_vfe_bus_ver3_stop_wm(rsrc_data->wm_res[i]);
 
+	if (common_data->rup_irq_handle[rsrc_data->source_group]) {
+		rc = cam_irq_controller_unsubscribe_irq(
+			common_data->bus_irq_controller,
+			common_data->rup_irq_handle[rsrc_data->source_group]);
+		common_data->rup_irq_handle[rsrc_data->source_group] = 0;
+	}
+
+	if (vfe_out->irq_handle) {
+		rc = cam_irq_controller_unsubscribe_irq(
+			common_data->bus_irq_controller,
+			vfe_out->irq_handle);
+		vfe_out->irq_handle = 0;
+	}
+
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	return rc;
 }
@@ -2043,31 +2184,94 @@
 static int cam_vfe_bus_ver3_handle_vfe_out_done_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                     rc;
+	int                                         i;
+	struct cam_isp_resource_node               *vfe_out = NULL;
+	struct cam_vfe_bus_ver3_vfe_out_data       *rsrc_data = NULL;
+	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
+
+	vfe_out = th_payload->handler_priv;
+	if (!vfe_out) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
+		return -ENODEV;
+	}
+
+	rsrc_data = vfe_out->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+
+	rc  = cam_vfe_bus_ver3_get_evt_payload(rsrc_data->common_data,
+		&evt_payload);
+
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0 = 0x%x status_1 = 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	evt_payload->core_index = rsrc_data->common_data->core_index;
+	evt_payload->evt_id = evt_id;
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_bus_ver3_handle_vfe_out_done_bottom_half(
 	void                *handler_priv,
 	void                *evt_payload_priv)
 {
-	int rc = -EINVAL;
+	int                                   rc = -EINVAL, num_out = 0, i = 0;
 	struct cam_isp_resource_node         *vfe_out = handler_priv;
 	struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = vfe_out->res_priv;
 	struct cam_vfe_bus_irq_evt_payload   *evt_payload = evt_payload_priv;
+	struct cam_isp_hw_event_info          evt_info;
+	void                                 *ctx = NULL;
+	uint32_t                              evt_id = 0, comp_mask = 0;
+	uint32_t                         out_list[CAM_VFE_BUS_VER3_VFE_OUT_MAX];
 
-	if (evt_payload->evt_id == CAM_ISP_HW_EVENT_REG_UPDATE) {
-		rc = cam_vfe_bus_ver3_handle_rup_bottom_half(
-			handler_priv, evt_payload_priv);
-		return rc;
-	}
-	/* We only handle composite buf done */
-	if (rsrc_data->comp_grp) {
-		rc = rsrc_data->comp_grp->bottom_half_handler(
-			rsrc_data->comp_grp, evt_payload_priv);
-	}
-
+	rc = cam_vfe_bus_ver3_handle_comp_done_bottom_half(
+		rsrc_data->comp_grp, evt_payload_priv, &comp_mask);
 	CAM_DBG(CAM_ISP, "vfe_out %d rc %d", rsrc_data->out_type, rc);
 
+	ctx = rsrc_data->priv;
+	memset(out_list, 0, sizeof(out_list));
+
+	switch (rc) {
+	case CAM_VFE_IRQ_STATUS_SUCCESS:
+		evt_id = evt_payload->evt_id;
+
+		evt_info.res_type = vfe_out->res_type;
+		evt_info.hw_idx   = vfe_out->hw_intf->hw_idx;
+
+		rc = cam_vfe_bus_ver3_get_comp_vfe_out_res_id_list(
+			comp_mask, out_list, &num_out,
+			rsrc_data->common_data->is_lite);
+		for (i = 0; i < num_out; i++) {
+			evt_info.res_id = out_list[i];
+			if (rsrc_data->common_data->event_cb)
+				rsrc_data->common_data->event_cb(ctx, evt_id,
+					(void *)&evt_info);
+		}
+		break;
+	default:
+		break;
+	}
+
+	cam_vfe_bus_ver3_put_evt_payload(rsrc_data->common_data, &evt_payload);
+
 	return rc;
 }
 
@@ -2109,14 +2313,16 @@
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
 	INIT_LIST_HEAD(&vfe_out->list);
 
-	rsrc_data->out_type    =
+	rsrc_data->source_group =
+		ver3_hw_info->vfe_out_hw_info[index].source_group;
+	rsrc_data->out_type     =
 		ver3_hw_info->vfe_out_hw_info[index].vfe_out_type;
-	rsrc_data->common_data = &ver3_bus_priv->common_data;
-	rsrc_data->max_width   =
+	rsrc_data->common_data  = &ver3_bus_priv->common_data;
+	rsrc_data->max_width    =
 		ver3_hw_info->vfe_out_hw_info[index].max_width;
-	rsrc_data->max_height  =
+	rsrc_data->max_height   =
 		ver3_hw_info->vfe_out_hw_info[index].max_height;
-	rsrc_data->secure_mode = CAM_SECURE_MODE_NON_SECURE;
+	rsrc_data->secure_mode  = CAM_SECURE_MODE_NON_SECURE;
 
 	vfe_out->start = cam_vfe_bus_ver3_start_vfe_out;
 	vfe_out->stop = cam_vfe_bus_ver3_stop_vfe_out;
@@ -2126,6 +2332,7 @@
 		cam_vfe_bus_ver3_handle_vfe_out_done_bottom_half;
 	vfe_out->process_cmd = cam_vfe_bus_ver3_process_cmd;
 	vfe_out->hw_intf = ver3_bus_priv->common_data.hw_intf;
+	vfe_out->irq_handle = 0;
 
 	return 0;
 }
@@ -2149,6 +2356,7 @@
 	vfe_out->top_half_handler = NULL;
 	vfe_out->bottom_half_handler = NULL;
 	vfe_out->hw_intf = NULL;
+	vfe_out->irq_handle = 0;
 
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
 	INIT_LIST_HEAD(&vfe_out->list);
@@ -2202,19 +2410,11 @@
 		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
 
 	evt_payload->core_index = bus_priv->common_data.core_index;
-	evt_payload->evt_id  = evt_id;
 
-	evt_payload->ctx = &bus_priv->common_data;
 	evt_payload->ccif_violation_status = cam_io_r_mb(
 		bus_priv->common_data.mem_base +
 		bus_priv->common_data.common_reg->ccif_violation_status);
 
-	evt_payload->overflow_status = cam_io_r_mb(
-		bus_priv->common_data.mem_base +
-		bus_priv->common_data.common_reg->overflow_status);
-	cam_io_w_mb(0x1, bus_priv->common_data.mem_base +
-		bus_priv->common_data.common_reg->overflow_status_clear);
-
 	evt_payload->image_size_violation_status = cam_io_r_mb(
 		bus_priv->common_data.mem_base +
 		bus_priv->common_data.common_reg->image_size_violation_status);
@@ -2224,18 +2424,19 @@
 	return rc;
 }
 
-static int cam_vfe_bus_ver3_err_irq_bottom_half(void *ctx_priv,
-	void *evt_payload_priv)
+static int cam_vfe_bus_ver3_err_irq_bottom_half(
+	void *handler_priv, void *evt_payload_priv)
 {
-	struct cam_vfe_bus_irq_evt_payload *evt_payload;
+	struct cam_vfe_bus_irq_evt_payload *evt_payload = evt_payload_priv;
+	struct cam_vfe_bus_ver3_priv *bus_priv = handler_priv;
 	struct cam_vfe_bus_ver3_common_data *common_data;
+	struct cam_isp_hw_event_info evt_info;
 	uint32_t val = 0, image_size_violation = 0, ccif_violation = 0;
 
-	if (!ctx_priv || !evt_payload_priv)
+	if (!handler_priv || !evt_payload_priv)
 		return -EINVAL;
 
-	evt_payload = evt_payload_priv;
-	common_data = evt_payload->ctx;
+	common_data = &bus_priv->common_data;
 
 	val = evt_payload->irq_reg_val[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
 	image_size_violation = (val >> 31) & 0x1;
@@ -2245,6 +2446,50 @@
 	CAM_ERR(CAM_ISP, "image_size_violation %d ccif_violation %d",
 		image_size_violation, ccif_violation);
 
+	if (common_data->is_lite) {
+		if (image_size_violation) {
+			val = evt_payload->image_size_violation_status;
+
+			if (val & 0x01)
+				CAM_INFO(CAM_ISP,
+					"RDI 0 image size violation");
+
+			if (val & 0x02)
+				CAM_INFO(CAM_ISP,
+					"RDI 1 image size violation");
+
+			if (val & 0x04)
+				CAM_INFO(CAM_ISP,
+					"RDI 2 image size violation");
+
+			if (val & 0x08)
+				CAM_INFO(CAM_ISP,
+					"RDI 3 image size violation");
+		}
+
+		if (ccif_violation) {
+			val = evt_payload->ccif_violation_status;
+
+			if (val & 0x01)
+				CAM_INFO(CAM_ISP,
+					"RDI 0 ccif violation");
+
+			if (val & 0x02)
+				CAM_INFO(CAM_ISP,
+					"RDI 1 ccif violation");
+
+			if (val & 0x04)
+				CAM_INFO(CAM_ISP,
+					"RDI 2 ccif violation");
+
+			if (val & 0x08)
+				CAM_INFO(CAM_ISP,
+					"RDI 3 ccif violation");
+		}
+
+		goto end;
+	}
+
 	if (image_size_violation) {
 		val = evt_payload->image_size_violation_status;
 
@@ -2414,7 +2659,17 @@
 
 	}
 
+end:
 	cam_vfe_bus_ver3_put_evt_payload(common_data, &evt_payload);
+
+	evt_info.hw_idx = common_data->core_index;
+	evt_info.res_type = CAM_ISP_RESOURCE_VFE_OUT;
+	evt_info.res_id = CAM_VFE_BUS_VER3_VFE_OUT_MAX;
+	evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION;
+
+	if (common_data->event_cb)
+		common_data->event_cb(NULL, CAM_ISP_HW_EVENT_ERROR,
+			(void *)&evt_info);
 	return 0;
 }
 
@@ -2631,8 +2886,6 @@
 		else
 			loop_size = 1;
 
-
-
 		/* WM Image address */
 		for (k = 0; k < loop_size; k++) {
 			if (wm_data->en_ubwc)
@@ -3023,14 +3276,14 @@
 	void *init_hw_args, uint32_t arg_size)
 {
 	struct cam_vfe_bus_ver3_priv    *bus_priv = hw_priv;
-	uint32_t                         top_irq_reg_mask[2] = {0};
+	uint32_t                         top_irq_reg_mask[3] = {0};
 
 	if (!bus_priv) {
 		CAM_ERR(CAM_ISP, "Invalid args");
 		return -EINVAL;
 	}
 
-	top_irq_reg_mask[0] = (1 << 7);
+	top_irq_reg_mask[0] = (1 << bus_priv->top_irq_shift);
 
 	bus_priv->irq_handle = cam_irq_controller_subscribe_irq(
 		bus_priv->common_data.vfe_irq_controller,
@@ -3042,8 +3295,9 @@
 		NULL,
 		NULL);
 
-	if ((int)bus_priv->irq_handle <= 0) {
+	if (bus_priv->irq_handle < 1) {
 		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+		bus_priv->irq_handle = 0;
 		return -EFAULT;
 	}
 
@@ -3058,35 +3312,19 @@
 			bus_priv->tasklet_info,
 			&tasklet_bh_api);
 
-		if ((int)bus_priv->error_irq_handle <= 0) {
+		if (bus_priv->error_irq_handle < 1) {
 			CAM_ERR(CAM_ISP, "Failed to subscribe BUS Error IRQ");
+			bus_priv->error_irq_handle = 0;
 			return -EFAULT;
 		}
 	}
 
-	if (bus_priv->tasklet_info != NULL) {
-		bus_priv->rup_irq_handle = cam_irq_controller_subscribe_irq(
-			bus_priv->common_data.bus_irq_controller,
-			CAM_IRQ_PRIORITY_0,
-			rup_irq_mask,
-			bus_priv,
-			cam_vfe_bus_ver3_handle_rup_top_half,
-			cam_ife_mgr_do_tasklet_reg_update,
-			bus_priv->tasklet_info,
-			&tasklet_bh_api);
-
-		if (bus_priv->rup_irq_handle <= 0) {
-			CAM_ERR(CAM_ISP, "Failed to subscribe RUP IRQ");
-			return -EFAULT;
-		}
-	}
-
-	// no clock gating at bus input
+	/* no clock gating at bus input */
 	CAM_INFO(CAM_ISP, "Overriding clock gating at bus input");
 	cam_io_w_mb(0x3FFFFFF, bus_priv->common_data.mem_base +
 		bus_priv->common_data.common_reg->cgc_ovd);
 
-	// BUS_WR_TEST_BUS_CTRL
+	/* BUS_WR_TEST_BUS_CTRL */
 	cam_io_w_mb(0x0, bus_priv->common_data.mem_base +
 		bus_priv->common_data.common_reg->test_bus_ctrl);
 
@@ -3108,10 +3346,6 @@
 		rc = cam_irq_controller_unsubscribe_irq(
 			bus_priv->common_data.bus_irq_controller,
 			bus_priv->error_irq_handle);
-		if (rc)
-			CAM_ERR(CAM_ISP,
-				"Failed to unsubscribe error irq rc=%d", rc);
-
 		bus_priv->error_irq_handle = 0;
 	}
 
@@ -3119,10 +3353,6 @@
 		rc = cam_irq_controller_unsubscribe_irq(
 			bus_priv->common_data.vfe_irq_controller,
 			bus_priv->irq_handle);
-		if (rc)
-			CAM_ERR(CAM_ISP,
-				"Failed to unsubscribe irq rc=%d", rc);
-
 		bus_priv->irq_handle = 0;
 	}
 
@@ -3175,11 +3405,6 @@
 			rc = cam_irq_controller_unsubscribe_irq(
 				bus_priv->common_data.bus_irq_controller,
 				bus_priv->error_irq_handle);
-			if (rc)
-				CAM_ERR(CAM_ISP,
-					"Failed to unsubscribe error irq rc=%d",
-					rc);
-
 			bus_priv->error_irq_handle = 0;
 		}
 		break;
@@ -3236,6 +3461,7 @@
 
 	bus_priv->num_client                     = ver3_hw_info->num_client;
 	bus_priv->num_out                        = ver3_hw_info->num_out;
+	bus_priv->top_irq_shift                  = ver3_hw_info->top_irq_shift;
 	bus_priv->common_data.num_sec_out        = 0;
 	bus_priv->common_data.secure_mode        = CAM_SECURE_MODE_NON_SECURE;
 	bus_priv->common_data.core_index         = soc_info->index;
@@ -3244,6 +3470,8 @@
 	bus_priv->common_data.hw_intf            = hw_intf;
 	bus_priv->common_data.vfe_irq_controller = vfe_irq_controller;
 	bus_priv->common_data.common_reg         = &ver3_hw_info->common_reg;
+	bus_priv->common_data.comp_done_shift    =
+		ver3_hw_info->comp_done_shift;
 
 	if (strnstr(soc_info->compatible, "lite",
 		strlen(soc_info->compatible)) != NULL)
@@ -3251,6 +3479,9 @@
 	else
 		bus_priv->common_data.is_lite = false;
 
+	for (i = 0; i < CAM_VFE_BUS_VER3_SRC_GRP_MAX; i++)
+		bus_priv->common_data.rup_irq_handle[i] = 0;
+
 	mutex_init(&bus_priv->common_data.bus_mutex);
 
 	rc = cam_irq_controller_init(drv_name, bus_priv->common_data.mem_base,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h
index 4711d75..c5b4ab6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h
@@ -158,6 +158,7 @@
 	enum cam_vfe_bus_ver3_vfe_out_type  vfe_out_type;
 	uint32_t                            max_width;
 	uint32_t                            max_height;
+	uint32_t                            source_group;
 };
 
 /*
@@ -166,9 +167,11 @@
  * @Brief:            HW register info for entire Bus
  *
  * @common_reg:       Common register details
+ * @num_client:       Total number of write clients
  * @bus_client_reg:   Bus client register info
- * @comp_reg_grp:     Composite group register info
  * @vfe_out_hw_info:  VFE output capability
+ * @comp_done_shift:  Mask shift for comp done mask
+ * @top_irq_shift:    Mask shift for top level BUS WR irq
  */
 struct cam_vfe_bus_ver3_hw_info {
 	struct cam_vfe_bus_ver3_reg_offset_common common_reg;
@@ -178,6 +181,8 @@
 	uint32_t num_out;
 	struct cam_vfe_bus_ver3_vfe_out_hw_info
 		vfe_out_hw_info[CAM_VFE_BUS_VER3_VFE_OUT_MAX];
+	uint32_t comp_done_shift;
+	uint32_t top_irq_shift;
 };
 
 /*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
index 07d52e0..be4902b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
@@ -6,6 +6,7 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c
index b79c6e6..da717a3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -12,6 +12,8 @@
 #include "cam_vfe_soc.h"
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver2.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
 #include "cam_vfe_camif_lite_ver2.h"
 #include "cam_debug_util.h"
 #include "cam_cdm_util.h"
@@ -24,8 +26,130 @@
 	struct cam_vfe_camif_lite_ver2_reg_data     *reg_data;
 	struct cam_hw_soc_info                      *soc_info;
 	enum cam_isp_hw_sync_mode                    sync_mode;
+
+	cam_hw_mgr_event_cb_func              event_cb;
+	void                                 *priv;
+	int                                   irq_err_handle;
+	int                                   irq_handle;
+	void                                 *vfe_irq_controller;
+	struct cam_vfe_top_irq_evt_payload
+		evt_payload[CAM_VFE_CAMIF_LITE_EVT_MAX];
+	struct list_head                      free_payload_list;
+	spinlock_t                            spin_lock;
 };
 
+static int cam_vfe_camif_lite_get_evt_payload(
+	struct cam_vfe_mux_camif_lite_data       *camif_lite_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	int rc = 0;
+
+	spin_lock(&camif_lite_priv->spin_lock);
+	if (list_empty(&camif_lite_priv->free_payload_list)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
+		rc = -ENODEV;
+		goto done;
+	}
+
+	*evt_payload = list_first_entry(&camif_lite_priv->free_payload_list,
+		struct cam_vfe_top_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+	rc = 0;
+done:
+	spin_unlock(&camif_lite_priv->spin_lock);
+	return rc;
+}
+
+static int cam_vfe_camif_lite_put_evt_payload(
+	struct cam_vfe_mux_camif_lite_data       *camif_lite_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	unsigned long flags;
+
+	if (!camif_lite_priv) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&camif_lite_priv->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list,
+		&camif_lite_priv->free_payload_list);
+	*evt_payload = NULL;
+	spin_unlock_irqrestore(&camif_lite_priv->spin_lock, flags);
+
+	CAM_DBG(CAM_ISP, "Done");
+	return 0;
+}
+
+static int cam_vfe_camif_lite_err_irq_top_half(
+	uint32_t                               evt_id,
+	struct cam_irq_th_payload             *th_payload)
+{
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_lite_node;
+	struct cam_vfe_mux_camif_lite_data    *camif_lite_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+	bool                                   error_flag = false;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_1 = %x",
+		th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]);
+
+	camif_lite_node = th_payload->handler_priv;
+	camif_lite_priv = camif_lite_node->res_priv;
+	/*
+	 *  need to handle overflow condition here, otherwise irq storm
+	 *  will block everything
+	 */
+	if (th_payload->evt_status_arr[1] || (th_payload->evt_status_arr[0] &
+		camif_lite_priv->reg_data->lite_err_irq_mask0)) {
+		CAM_ERR(CAM_ISP,
+			"CAMIF LITE ERR VFE:%d IRQ STATUS_0=0x%x STATUS_1=0x%x",
+			camif_lite_node->hw_intf->hw_idx,
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		CAM_ERR(CAM_ISP, "Stopping further IRQ processing from VFE:%d",
+			camif_lite_node->hw_intf->hw_idx);
+		cam_irq_controller_disable_irq(
+			camif_lite_priv->vfe_irq_controller,
+			camif_lite_priv->irq_err_handle);
+		cam_irq_controller_clear_and_mask(evt_id,
+			camif_lite_priv->vfe_irq_controller);
+		error_flag = true;
+	}
+
+	rc  = cam_vfe_camif_lite_get_evt_payload(camif_lite_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ STATUS_0=0x%x STATUS_1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	evt_payload->irq_reg_val[i] = cam_io_r(camif_lite_priv->mem_base +
+		camif_lite_priv->common_reg->violation_status);
+
+	if (error_flag)
+		CAM_INFO(CAM_ISP, "Violation status = 0x%x",
+			evt_payload->irq_reg_val[i]);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
+
 static int cam_vfe_camif_lite_get_reg_update(
 	struct cam_isp_resource_node          *camif_lite_res,
 	void                                  *cmd_args,
@@ -94,6 +218,8 @@
 	acquire_data = (struct cam_vfe_acquire_args *)acquire_param;
 
 	camif_lite_data->sync_mode   = acquire_data->vfe_in.sync_mode;
+	camif_lite_data->event_cb    = acquire_data->event_cb;
+	camif_lite_data->priv        = acquire_data->priv;
 
 	CAM_DBG(CAM_ISP, "hw id:%d sync_mode=%d",
 		camif_lite_res->hw_intf->hw_idx,
@@ -106,6 +232,8 @@
 {
 	struct cam_vfe_mux_camif_lite_data   *rsrc_data;
 	uint32_t                              val = 0;
+	int                                   rc = 0;
+	uint32_t err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
 
 	if (!camif_lite_res) {
 		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -121,6 +249,11 @@
 	rsrc_data = (struct cam_vfe_mux_camif_lite_data *)
 		camif_lite_res->res_priv;
 
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->reg_data->lite_err_irq_mask0;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->reg_data->lite_err_irq_mask1;
+
 	/* vfe core config */
 	val = cam_io_r_mb(rsrc_data->mem_base +
 		rsrc_data->common_reg->core_cfg);
@@ -151,14 +284,32 @@
 		camif_lite_res->hw_intf->hw_idx,
 		rsrc_data->reg_data->dual_pd_reg_update_cmd_data);
 
+	if (!rsrc_data->irq_err_handle) {
+		rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_1,
+			err_irq_mask,
+			camif_lite_res,
+			cam_vfe_camif_lite_err_irq_top_half,
+			camif_lite_res->bottom_half_handler,
+			camif_lite_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_err_handle < 1) {
+			CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_err_handle = 0;
+		}
+	}
+
 	CAM_DBG(CAM_ISP, "Start Camif Lite IFE %d Done",
 		camif_lite_res->hw_intf->hw_idx);
-	return 0;
+	return rc;
 }
 
 static int cam_vfe_camif_lite_resource_stop(
 	struct cam_isp_resource_node             *camif_lite_res)
 {
+	struct cam_vfe_mux_camif_lite_data       *camif_lite_priv;
 	int                                       rc = 0;
 
 	if (!camif_lite_res) {
@@ -170,9 +321,25 @@
 		(camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE))
 		return 0;
 
+	camif_lite_priv = (struct cam_vfe_mux_camif_lite_data *)camif_lite_res;
+
 	if (camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
 		camif_lite_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 
+	if (camif_lite_priv->irq_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			camif_lite_priv->vfe_irq_controller,
+			camif_lite_priv->irq_handle);
+		camif_lite_priv->irq_handle = 0;
+	}
+
+	if (camif_lite_priv->irq_err_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			camif_lite_priv->vfe_irq_controller,
+			camif_lite_priv->irq_err_handle);
+		camif_lite_priv->irq_err_handle = 0;
+	}
+
 	return rc;
 }
 
@@ -204,19 +371,49 @@
 static int cam_vfe_camif_lite_handle_irq_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_lite_node;
+	struct cam_vfe_mux_camif_lite_data    *camif_lite_priv;
+	struct cam_vfe_top_irq_evt_payload  *evt_payload;
+
+	camif_lite_node = th_payload->handler_priv;
+	camif_lite_priv = camif_lite_node->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+	rc  = cam_vfe_camif_lite_get_evt_payload(camif_lite_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_camif_lite_handle_irq_bottom_half(
-	void                                    *handler_priv,
-	void                                    *evt_payload_priv)
+	void                                 *handler_priv,
+	void                                 *evt_payload_priv)
 {
-	int                                      ret = CAM_VFE_IRQ_STATUS_ERR;
-	struct cam_isp_resource_node            *camif_lite_node;
-	struct cam_vfe_mux_camif_lite_data      *camif_lite_priv;
-	struct cam_vfe_top_irq_evt_payload      *payload;
-	uint32_t                                 irq_status0;
-	uint32_t                                 irq_status1;
+	int                                   ret = CAM_VFE_IRQ_STATUS_MAX;
+	struct cam_isp_resource_node         *camif_lite_node;
+	struct cam_vfe_mux_camif_lite_data   *camif_lite_priv;
+	struct cam_vfe_top_irq_evt_payload *payload;
+	struct cam_isp_hw_event_info          evt_info;
+	uint32_t                              irq_status0;
+	uint32_t                              irq_status1;
 
 	if (!handler_priv || !evt_payload_priv) {
 		CAM_ERR(CAM_ISP, "Invalid params");
@@ -225,55 +422,50 @@
 
 	camif_lite_node = handler_priv;
 	camif_lite_priv = camif_lite_node->res_priv;
-	payload         = evt_payload_priv;
-	irq_status0     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
-	irq_status1     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
+	payload = evt_payload_priv;
+	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+	irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
 
-	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
-	CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
+	evt_info.hw_idx   = camif_lite_node->hw_intf->hw_idx;
+	evt_info.res_id   = camif_lite_node->res_id;
+	evt_info.res_type = camif_lite_node->res_type;
 
-	switch (payload->evt_id) {
-	case CAM_ISP_HW_EVENT_SOF:
-		if (irq_status0 &
-			camif_lite_priv->reg_data->lite_sof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received SOF");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EPOCH:
-		if (irq_status0 &
-			camif_lite_priv->reg_data->lite_epoch0_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EPOCH");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_REG_UPDATE:
-		if (irq_status0 &
-			camif_lite_priv->reg_data->dual_pd_reg_upd_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EOF:
-		if (irq_status0 &
-			camif_lite_priv->reg_data->lite_eof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EOF\n");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_ERROR:
-		if (irq_status1 &
-			camif_lite_priv->reg_data->lite_error_irq_mask1) {
-			CAM_DBG(CAM_ISP, "Received ERROR\n");
-			ret = CAM_ISP_HW_ERROR_OVERFLOW;
-		} else {
-			ret = CAM_ISP_HW_ERROR_NONE;
-		}
-		break;
-	default:
-		break;
+	CAM_DBG(CAM_ISP, "irq_status_0 = 0x%x irq_status_1 = 0x%x",
+		irq_status0, irq_status1);
+
+	if (irq_status0 & camif_lite_priv->reg_data->lite_sof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received SOF");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
 
+	if (irq_status0 & camif_lite_priv->reg_data->lite_epoch0_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EPOCH");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & camif_lite_priv->reg_data->dual_pd_reg_upd_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & camif_lite_priv->reg_data->lite_eof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EOF\n");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if ((irq_status0 & camif_lite_priv->reg_data->lite_err_irq_mask0) ||
+		(irq_status1 & camif_lite_priv->reg_data->lite_err_irq_mask1)) {
+		CAM_DBG(CAM_ISP, "Received ERROR\n");
+
+		if (camif_lite_priv->event_cb)
+			camif_lite_priv->event_cb(camif_lite_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+	}
+
+	cam_vfe_camif_lite_put_evt_payload(camif_lite_priv, &payload);
+
 	CAM_DBG(CAM_ISP, "returning status = %d", ret);
 	return ret;
 }
@@ -282,11 +474,13 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_lite_hw_info,
-	struct cam_isp_resource_node  *camif_lite_node)
+	struct cam_isp_resource_node  *camif_lite_node,
+	void                          *vfe_irq_controller)
 {
 	struct cam_vfe_mux_camif_lite_data       *camif_lite_priv = NULL;
 	struct cam_vfe_camif_lite_ver2_hw_info   *camif_lite_info =
 		camif_lite_hw_info;
+	int                                       i = 0;
 
 	camif_lite_priv = kzalloc(sizeof(*camif_lite_priv),
 		GFP_KERNEL);
@@ -295,13 +489,14 @@
 
 	camif_lite_node->res_priv = camif_lite_priv;
 
-	camif_lite_priv->mem_base         =
+	camif_lite_priv->mem_base           =
 		soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
-	camif_lite_priv->camif_lite_reg   = camif_lite_info->camif_lite_reg;
-	camif_lite_priv->common_reg       = camif_lite_info->common_reg;
-	camif_lite_priv->reg_data         = camif_lite_info->reg_data;
-	camif_lite_priv->hw_intf          = hw_intf;
-	camif_lite_priv->soc_info         = soc_info;
+	camif_lite_priv->camif_lite_reg     = camif_lite_info->camif_lite_reg;
+	camif_lite_priv->common_reg         = camif_lite_info->common_reg;
+	camif_lite_priv->reg_data           = camif_lite_info->reg_data;
+	camif_lite_priv->hw_intf            = hw_intf;
+	camif_lite_priv->soc_info           = soc_info;
+	camif_lite_priv->vfe_irq_controller = vfe_irq_controller;
 
 	camif_lite_node->init    = NULL;
 	camif_lite_node->deinit  = NULL;
@@ -313,14 +508,27 @@
 	camif_lite_node->bottom_half_handler =
 		cam_vfe_camif_lite_handle_irq_bottom_half;
 
+	spin_lock_init(&camif_lite_priv->spin_lock);
+	INIT_LIST_HEAD(&camif_lite_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_LITE_EVT_MAX; i++) {
+		INIT_LIST_HEAD(&camif_lite_priv->evt_payload[i].list);
+		list_add_tail(&camif_lite_priv->evt_payload[i].list,
+			&camif_lite_priv->free_payload_list);
+	}
+
 	return 0;
 }
 
 int cam_vfe_camif_lite_ver2_deinit(
 	struct cam_isp_resource_node  *camif_lite_node)
 {
-	struct cam_vfe_mux_camif_data *camif_lite_priv =
+	struct cam_vfe_mux_camif_lite_data *camif_lite_priv =
 		camif_lite_node->res_priv;
+	int                                 i = 0;
+
+	INIT_LIST_HEAD(&camif_lite_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_LITE_EVT_MAX; i++)
+		INIT_LIST_HEAD(&camif_lite_priv->evt_payload[i].list);
 
 	camif_lite_node->start = NULL;
 	camif_lite_node->stop  = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h
index 291e350..7813e55 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE_CAMIF_LITE_VER2_H_
@@ -9,6 +9,8 @@
 #include "cam_isp_hw.h"
 #include "cam_vfe_top.h"
 
+#define CAM_VFE_CAMIF_LITE_EVT_MAX     256
+
 struct cam_vfe_camif_lite_ver2_reg {
 	uint32_t     camif_lite_cmd;
 	uint32_t     camif_lite_config;
@@ -25,8 +27,10 @@
 	uint32_t     lite_epoch0_irq_mask;
 	uint32_t     dual_pd_reg_upd_irq_mask;
 	uint32_t     lite_eof_irq_mask;
-	uint32_t     lite_error_irq_mask0;
-	uint32_t     lite_error_irq_mask1;
+	uint32_t     lite_err_irq_mask0;
+	uint32_t     lite_err_irq_mask1;
+	uint32_t     lite_subscribe_irq_mask0;
+	uint32_t     lite_subscribe_irq_mask1;
 	uint32_t     extern_reg_update_shift;
 	uint32_t     dual_pd_path_sel_shift;
 };
@@ -45,7 +49,8 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_lite_hw_info,
-	struct cam_isp_resource_node  *camif_lite_node);
+	struct cam_isp_resource_node  *camif_lite_node,
+	void                          *vfe_irq_controller);
 
 int cam_vfe_camif_lite_ver2_deinit(
 	struct cam_isp_resource_node  *camif_node);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
index 67683cb..5153767 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
@@ -12,6 +12,8 @@
 #include "cam_vfe_soc.h"
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver3.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
 #include "cam_vfe_camif_lite_ver3.h"
 #include "cam_debug_util.h"
 #include "cam_cdm_util.h"
@@ -24,8 +26,130 @@
 	struct cam_vfe_camif_lite_ver3_reg_data     *reg_data;
 	struct cam_hw_soc_info                      *soc_info;
 	enum cam_isp_hw_sync_mode                    sync_mode;
+
+	cam_hw_mgr_event_cb_func                     event_cb;
+	void                                        *priv;
+	int                                          irq_err_handle;
+	int                                          irq_handle;
+	void                                        *vfe_irq_controller;
+	struct list_head                             free_payload_list;
+	spinlock_t                                   spin_lock;
+	struct cam_vfe_top_irq_evt_payload
+		evt_payload[CAM_VFE_CAMIF_LITE_EVT_MAX];
 };
 
+static int cam_vfe_camif_lite_get_evt_payload(
+	struct cam_vfe_mux_camif_lite_data     *camif_lite_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	int rc = 0;
+
+	spin_lock(&camif_lite_priv->spin_lock);
+	if (list_empty(&camif_lite_priv->free_payload_list)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
+		rc = -ENODEV;
+		goto done;
+	}
+
+	*evt_payload = list_first_entry(&camif_lite_priv->free_payload_list,
+		struct cam_vfe_top_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+	rc = 0;
+done:
+	spin_unlock(&camif_lite_priv->spin_lock);
+	return rc;
+}
+
+static int cam_vfe_camif_lite_put_evt_payload(
+	struct cam_vfe_mux_camif_lite_data     *camif_lite_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	unsigned long flags;
+
+	if (!camif_lite_priv) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&camif_lite_priv->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list,
+		&camif_lite_priv->free_payload_list);
+	*evt_payload = NULL;
+	spin_unlock_irqrestore(&camif_lite_priv->spin_lock, flags);
+
+	CAM_DBG(CAM_ISP, "Done");
+	return 0;
+}
+
+static int cam_vfe_camif_lite_err_irq_top_half(
+	uint32_t                               evt_id,
+	struct cam_irq_th_payload             *th_payload)
+{
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_lite_node;
+	struct cam_vfe_mux_camif_lite_data    *camif_lite_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+	bool                                   error_flag = false;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_2 = %x",
+		th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]);
+
+	camif_lite_node = th_payload->handler_priv;
+	camif_lite_priv = camif_lite_node->res_priv;
+	/*
+	 *  need to handle overflow condition here, otherwise irq storm
+	 *  will block everything
+	 */
+	if (th_payload->evt_status_arr[2] || (th_payload->evt_status_arr[0] &
+		camif_lite_priv->reg_data->error_irq_mask0)) {
+		CAM_ERR(CAM_ISP,
+			"CAMIF Lite Err VFE:%d IRQ STATUS_0=0x%x STATUS_2=0x%x",
+			camif_lite_node->hw_intf->hw_idx,
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[2]);
+		CAM_ERR(CAM_ISP, "Stopping further IRQ processing from VFE:%d",
+			camif_lite_node->hw_intf->hw_idx);
+		cam_irq_controller_disable_irq(
+			camif_lite_priv->vfe_irq_controller,
+			camif_lite_priv->irq_err_handle);
+		cam_irq_controller_clear_and_mask(evt_id,
+			camif_lite_priv->vfe_irq_controller);
+		error_flag = true;
+	}
+
+	rc  = cam_vfe_camif_lite_get_evt_payload(camif_lite_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ STATUS_0=0x%x STATUS_2=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[2]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	evt_payload->irq_reg_val[i] = cam_io_r(camif_lite_priv->mem_base +
+		camif_lite_priv->common_reg->violation_status);
+
+	if (error_flag)
+		CAM_INFO(CAM_ISP, "Violation status = 0x%x",
+			evt_payload->irq_reg_val[i]);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
+
 static int cam_vfe_camif_lite_get_reg_update(
 	struct cam_isp_resource_node          *camif_lite_res,
 	void                                  *cmd_args,
@@ -96,6 +220,8 @@
 	acquire_data = (struct cam_vfe_acquire_args *)acquire_param;
 
 	camif_lite_data->sync_mode   = acquire_data->vfe_in.sync_mode;
+	camif_lite_data->event_cb    = acquire_data->event_cb;
+	camif_lite_data->priv        = acquire_data->priv;
 
 	CAM_DBG(CAM_ISP, "hw id:%d sync_mode=%d",
 		camif_lite_res->hw_intf->hw_idx,
@@ -108,6 +234,9 @@
 {
 	struct cam_vfe_mux_camif_lite_data   *rsrc_data;
 	uint32_t                              val = 0;
+	int                                   rc = 0;
+	uint32_t err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
+	uint32_t irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
 
 	if (!camif_lite_res) {
 		CAM_ERR(CAM_ISP, "Invalid input arguments");
@@ -125,6 +254,10 @@
 	rsrc_data = (struct cam_vfe_mux_camif_lite_data *)
 		camif_lite_res->res_priv;
 
+	if (strnstr(rsrc_data->soc_info->compatible, "lite",
+		strlen(rsrc_data->soc_info->compatible)) != NULL)
+		goto skip_core_cfg;
+
 	/* vfe core config */
 	val = cam_io_r_mb(rsrc_data->mem_base +
 		rsrc_data->common_reg->core_cfg_0);
@@ -140,11 +273,12 @@
 	CAM_DBG(CAM_ISP, "hw id:%d core_cfg val:%d",
 		camif_lite_res->hw_intf->hw_idx, val);
 
-	/* epoch config with 20 line */
+	/* epoch config */
 	cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg,
 		rsrc_data->mem_base +
 		rsrc_data->camif_lite_reg->lite_epoch_irq);
 
+skip_core_cfg:
 	/* Enable Camif */
 	cam_io_w_mb(0x1,
 		rsrc_data->mem_base +
@@ -157,15 +291,57 @@
 		rsrc_data->mem_base +
 		rsrc_data->camif_lite_reg->reg_update_cmd);
 
-	CAM_DBG(CAM_ISP, "Start Camif Lite IFE %d Done",
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->reg_data->error_irq_mask0;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS2] =
+		rsrc_data->reg_data->error_irq_mask2;
+	irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->reg_data->subscribe_irq_mask1;
+
+	if (!rsrc_data->irq_handle) {
+		rsrc_data->irq_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_0,
+			irq_mask,
+			camif_lite_res,
+			camif_lite_res->top_half_handler,
+			camif_lite_res->bottom_half_handler,
+			camif_lite_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_handle < 1) {
+			CAM_ERR(CAM_ISP, "IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_handle = 0;
+		}
+	}
+
+	if (!rsrc_data->irq_err_handle) {
+		rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_1,
+			err_irq_mask,
+			camif_lite_res,
+			cam_vfe_camif_lite_err_irq_top_half,
+			camif_lite_res->bottom_half_handler,
+			camif_lite_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_err_handle < 1) {
+			CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_err_handle = 0;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "Start CAMIF Lite VFE:%d Done",
 		camif_lite_res->hw_intf->hw_idx);
-	return 0;
+	return rc;
 }
 
 static int cam_vfe_camif_lite_resource_stop(
 	struct cam_isp_resource_node             *camif_lite_res)
 {
 	struct cam_vfe_mux_camif_lite_data       *rsrc_data;
+	int                                       rc = 0;
 
 	if (!camif_lite_res) {
 		CAM_ERR(CAM_ISP, "Invalid input arguments");
@@ -182,14 +358,27 @@
 		(struct cam_vfe_mux_camif_lite_data *)camif_lite_res->res_priv;
 
 	/* Disable Camif */
-	cam_io_w_mb(0x0,
-		rsrc_data->mem_base +
+	cam_io_w_mb(0x0, rsrc_data->mem_base +
 		rsrc_data->camif_lite_reg->lite_module_config);
 
 	if (camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
 		camif_lite_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 
-	return 0;
+	if (rsrc_data->irq_handle > 0) {
+		cam_irq_controller_unsubscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			rsrc_data->irq_handle);
+		rsrc_data->irq_handle = 0;
+	}
+
+	if (rsrc_data->irq_err_handle > 0) {
+		cam_irq_controller_unsubscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			rsrc_data->irq_err_handle);
+		rsrc_data->irq_err_handle = 0;
+	}
+
+	return rc;
 }
 
 static int cam_vfe_camif_lite_process_cmd(
@@ -220,17 +409,50 @@
 static int cam_vfe_camif_lite_handle_irq_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_lite_node;
+	struct cam_vfe_mux_camif_lite_data    *camif_lite_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+
+	camif_lite_node = th_payload->handler_priv;
+	camif_lite_priv = camif_lite_node->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[2]);
+
+	rc  = cam_vfe_camif_lite_get_evt_payload(camif_lite_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0: 0x%x status_1: 0x%x status_2: 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1],
+			th_payload->evt_status_arr[2]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_camif_lite_handle_irq_bottom_half(
 	void                                    *handler_priv,
 	void                                    *evt_payload_priv)
 {
-	int                                      ret = CAM_VFE_IRQ_STATUS_ERR;
+	int                                      ret = CAM_VFE_IRQ_STATUS_MAX;
 	struct cam_isp_resource_node            *camif_lite_node;
 	struct cam_vfe_mux_camif_lite_data      *camif_lite_priv;
 	struct cam_vfe_top_irq_evt_payload      *payload;
+	struct cam_isp_hw_event_info             evt_info;
 	uint32_t                                 irq_status0;
 	uint32_t                                 irq_status1;
 	uint32_t                                 irq_status2;
@@ -243,52 +465,72 @@
 	camif_lite_node = handler_priv;
 	camif_lite_priv = camif_lite_node->res_priv;
 	payload         = evt_payload_priv;
+
 	irq_status0     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
 	irq_status1     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
-	irq_status2     = payload->irq_reg_val[CAM_IFE_IRQ_VIOLATION_STATUS];
+	irq_status2     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS2];
 
-	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+	evt_info.hw_idx   = camif_lite_node->hw_intf->hw_idx;
+	evt_info.res_id   = camif_lite_node->res_id;
+	evt_info.res_type = camif_lite_node->res_type;
+
 	CAM_DBG(CAM_ISP,
-		"irq_status_0 = 0x%x, irq_status_0 = 0x%x, irq_status_0 = 0x%x",
+		"irq_status_0 = 0x%x, irq_status_1 = 0x%x, irq_status_2 = 0x%x",
 		irq_status0, irq_status1, irq_status2);
 
-	switch (payload->evt_id) {
-	case CAM_ISP_HW_EVENT_SOF:
-		if (irq_status1 &
-			camif_lite_priv->reg_data->sof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received SOF");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EPOCH:
-		if (irq_status1 &
-			camif_lite_priv->reg_data->epoch0_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EPOCH");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EOF:
-		if (irq_status1 &
-			camif_lite_priv->reg_data->eof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EOF\n");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_ERROR:
-		if ((irq_status0 &
-			camif_lite_priv->reg_data->error_irq_mask0) ||
-			(irq_status2 &
-			camif_lite_priv->reg_data->error_irq_mask2)) {
-			CAM_ERR(CAM_ISP, "Received ERROR\n");
-			ret = CAM_ISP_HW_ERROR_OVERFLOW;
-		} else {
-			ret = CAM_ISP_HW_ERROR_NONE;
-		}
-		break;
-	default:
-		break;
+	if (irq_status1 & camif_lite_priv->reg_data->sof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received SOF");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+
+		if (camif_lite_priv->event_cb)
+			camif_lite_priv->event_cb(camif_lite_priv->priv,
+				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
 	}
 
+	if (irq_status1 & camif_lite_priv->reg_data->epoch0_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EPOCH");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+
+		if (camif_lite_priv->event_cb)
+			camif_lite_priv->event_cb(camif_lite_priv->priv,
+				CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
+	}
+
+	if (irq_status1 & camif_lite_priv->reg_data->eof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EOF\n");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+
+		if (camif_lite_priv->event_cb)
+			camif_lite_priv->event_cb(camif_lite_priv->priv,
+				CAM_ISP_HW_EVENT_EOF, (void *)&evt_info);
+	}
+
+	if (irq_status0 & camif_lite_priv->reg_data->error_irq_mask0) {
+		CAM_DBG(CAM_ISP, "Received VFE Overflow ERROR\n");
+
+		evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW;
+
+		if (camif_lite_priv->event_cb)
+			camif_lite_priv->event_cb(camif_lite_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+	}
+
+	if (irq_status2 & camif_lite_priv->reg_data->error_irq_mask2) {
+		CAM_DBG(CAM_ISP, "Received CAMIF Lite Violation ERROR\n");
+
+		evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION;
+
+		if (camif_lite_priv->event_cb)
+			camif_lite_priv->event_cb(camif_lite_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_VIOLATION;
+	}
+
+	cam_vfe_camif_lite_put_evt_payload(camif_lite_priv, &payload);
+
 	CAM_DBG(CAM_ISP, "returning status = %d", ret);
 	return ret;
 }
@@ -297,12 +539,13 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_lite_hw_info,
-	struct cam_isp_resource_node  *camif_lite_node)
+	struct cam_isp_resource_node  *camif_lite_node,
+	void                          *vfe_irq_controller)
 {
 	struct cam_vfe_mux_camif_lite_data       *camif_lite_priv = NULL;
 	struct cam_vfe_camif_lite_ver3_hw_info   *camif_lite_info =
 		camif_lite_hw_info;
-
+	int                                       i = 0;
 	CAM_DBG(CAM_ISP, "res id %d", camif_lite_node->res_id);
 
 	camif_lite_priv = kzalloc(sizeof(*camif_lite_priv),
@@ -319,6 +562,7 @@
 	camif_lite_priv->reg_data         = camif_lite_info->reg_data;
 	camif_lite_priv->hw_intf          = hw_intf;
 	camif_lite_priv->soc_info         = soc_info;
+	camif_lite_priv->vfe_irq_controller = vfe_irq_controller;
 
 	camif_lite_node->init    = NULL;
 	camif_lite_node->deinit  = NULL;
@@ -330,14 +574,27 @@
 	camif_lite_node->bottom_half_handler =
 		cam_vfe_camif_lite_handle_irq_bottom_half;
 
+	spin_lock_init(&camif_lite_priv->spin_lock);
+	INIT_LIST_HEAD(&camif_lite_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_LITE_EVT_MAX; i++) {
+		INIT_LIST_HEAD(&camif_lite_priv->evt_payload[i].list);
+		list_add_tail(&camif_lite_priv->evt_payload[i].list,
+			&camif_lite_priv->free_payload_list);
+	}
+
 	return 0;
 }
 
 int cam_vfe_camif_lite_ver3_deinit(
 	struct cam_isp_resource_node  *camif_lite_node)
 {
-	struct cam_vfe_mux_camif_data *camif_lite_priv =
+	struct cam_vfe_mux_camif_lite_data *camif_lite_priv =
 		camif_lite_node->res_priv;
+	int                                 i = 0;
+
+	INIT_LIST_HEAD(&camif_lite_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_LITE_EVT_MAX; i++)
+		INIT_LIST_HEAD(&camif_lite_priv->evt_payload[i].list);
 
 	camif_lite_node->start = NULL;
 	camif_lite_node->stop  = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h
index 45aaa11..5b7c577c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h
@@ -10,6 +10,7 @@
 #include "cam_vfe_top.h"
 
 #define CAM_VFE_RDI_VER2_MAX 4
+#define CAM_VFE_CAMIF_LITE_EVT_MAX 256
 
 struct cam_vfe_camif_lite_ver3_reg {
 	uint32_t     lite_hw_version;
@@ -35,6 +36,7 @@
 	uint32_t     eof_irq_mask;
 	uint32_t     error_irq_mask0;
 	uint32_t     error_irq_mask2;
+	uint32_t     subscribe_irq_mask1;
 	uint32_t     enable_diagnostic_hw;
 };
 
@@ -52,7 +54,8 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_lite_hw_info,
-	struct cam_isp_resource_node  *camif_lite_node);
+	struct cam_isp_resource_node  *camif_lite_node,
+	void                          *vfe_irq_controller);
 
 int cam_vfe_camif_lite_ver3_deinit(
 	struct cam_isp_resource_node  *camif_node);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 149d45e..27ea3af 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -12,6 +12,8 @@
 #include "cam_vfe_soc.h"
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver2.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
 #include "cam_vfe_camif_ver2.h"
 #include "cam_debug_util.h"
 #include "cam_cdm_util.h"
@@ -27,6 +29,15 @@
 	struct cam_vfe_camif_reg_data               *reg_data;
 	struct cam_hw_soc_info                      *soc_info;
 
+	cam_hw_mgr_event_cb_func             event_cb;
+	void                                *priv;
+	int                                  irq_err_handle;
+	int                                  irq_handle;
+	void                                *vfe_irq_controller;
+	struct cam_vfe_top_irq_evt_payload evt_payload[CAM_VFE_CAMIF_EVT_MAX];
+	struct list_head                     free_payload_list;
+	spinlock_t                           spin_lock;
+
 	enum cam_isp_hw_sync_mode          sync_mode;
 	uint32_t                           dsp_mode;
 	uint32_t                           pix_pattern;
@@ -39,6 +50,114 @@
 	uint32_t                           camif_debug;
 };
 
+static int cam_vfe_camif_get_evt_payload(
+	struct cam_vfe_mux_camif_data            *camif_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	int rc = 0;
+
+	spin_lock(&camif_priv->spin_lock);
+	if (list_empty(&camif_priv->free_payload_list)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
+		rc = -ENODEV;
+		goto done;
+	}
+
+	*evt_payload = list_first_entry(&camif_priv->free_payload_list,
+		struct cam_vfe_top_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+done:
+	spin_unlock(&camif_priv->spin_lock);
+	return rc;
+}
+
+static int cam_vfe_camif_put_evt_payload(
+	struct cam_vfe_mux_camif_data            *camif_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	unsigned long flags;
+
+	if (!camif_priv) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&camif_priv->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list, &camif_priv->free_payload_list);
+	*evt_payload = NULL;
+	spin_unlock_irqrestore(&camif_priv->spin_lock, flags);
+
+	CAM_DBG(CAM_ISP, "Done");
+	return 0;
+}
+
+static int cam_vfe_camif_err_irq_top_half(
+	uint32_t                               evt_id,
+	struct cam_irq_th_payload             *th_payload)
+{
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_node;
+	struct cam_vfe_mux_camif_data         *camif_priv;
+	struct cam_vfe_top_irq_evt_payload  *evt_payload;
+	bool                                   error_flag = false;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_1 = %x",
+		th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]);
+
+	camif_node = th_payload->handler_priv;
+	camif_priv = camif_node->res_priv;
+	/*
+	 *  need to handle overflow condition here, otherwise irq storm
+	 *  will block everything
+	 */
+	if (th_payload->evt_status_arr[1] || (th_payload->evt_status_arr[0] &
+		camif_priv->reg_data->error_irq_mask0)) {
+		CAM_ERR(CAM_ISP,
+			"Camif Error: vfe:%d: IRQ STATUS_0=0x%x STATUS_1=0x%x",
+			camif_node->hw_intf->hw_idx,
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		CAM_ERR(CAM_ISP, "Stopping further IRQ processing from vfe=%d",
+			camif_node->hw_intf->hw_idx);
+		cam_irq_controller_disable_irq(camif_priv->vfe_irq_controller,
+			camif_priv->irq_err_handle);
+		cam_irq_controller_clear_and_mask(evt_id,
+			camif_priv->vfe_irq_controller);
+		error_flag = true;
+	}
+
+	rc  = cam_vfe_camif_get_evt_payload(camif_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ STATUS_0=0x%x STATUS_1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	evt_payload->irq_reg_val[i] = cam_io_r(camif_priv->mem_base +
+		camif_priv->common_reg->violation_status);
+
+	if (error_flag)
+		CAM_INFO(CAM_ISP, "Violation status = 0x%x",
+			evt_payload->irq_reg_val[2]);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
 static int cam_vfe_camif_validate_pix_pattern(uint32_t pattern)
 {
 	int rc;
@@ -135,6 +254,8 @@
 	camif_data->last_pixel  = acquire_data->vfe_in.in_port->left_stop;
 	camif_data->first_line  = acquire_data->vfe_in.in_port->line_start;
 	camif_data->last_line   = acquire_data->vfe_in.in_port->line_stop;
+	camif_data->event_cb    = acquire_data->event_cb;
+	camif_data->priv        = acquire_data->priv;
 
 	CAM_DBG(CAM_ISP, "hw id:%d pix_pattern:%d dsp_mode=%d",
 		camif_res->hw_intf->hw_idx,
@@ -194,18 +315,20 @@
 	}
 
 	return rc;
-
 }
 
 static int cam_vfe_camif_resource_start(
-	struct cam_isp_resource_node        *camif_res)
+	struct cam_isp_resource_node   *camif_res)
 {
-	struct cam_vfe_mux_camif_data       *rsrc_data;
-	uint32_t                             val = 0;
-	uint32_t                             epoch0_irq_mask;
-	uint32_t                             epoch1_irq_mask;
-	uint32_t                             computed_epoch_line_cfg;
-	struct cam_vfe_soc_private          *soc_private;
+	struct cam_vfe_mux_camif_data  *rsrc_data;
+	uint32_t                        val = 0;
+	uint32_t                        epoch0_irq_mask;
+	uint32_t                        epoch1_irq_mask;
+	uint32_t                        computed_epoch_line_cfg;
+	int                             rc = 0;
+	uint32_t                        err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
+	uint32_t                        irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
+	struct cam_vfe_soc_private     *soc_private;
 
 	if (!camif_res) {
 		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -219,6 +342,14 @@
 	}
 
 	rsrc_data = (struct cam_vfe_mux_camif_data  *)camif_res->res_priv;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->reg_data->error_irq_mask0;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->reg_data->error_irq_mask1;
+	irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->reg_data->subscribe_irq_mask0;
+	irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->reg_data->subscribe_irq_mask1;
 
 	soc_private = rsrc_data->soc_info->soc_private;
 
@@ -302,6 +433,40 @@
 			rsrc_data->camif_reg->vfe_diag_config);
 	}
 
+	if (!rsrc_data->irq_handle) {
+		rsrc_data->irq_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_0,
+			irq_mask,
+			camif_res,
+			camif_res->top_half_handler,
+			camif_res->bottom_half_handler,
+			camif_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_handle < 1) {
+			CAM_ERR(CAM_ISP, "IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_handle = 0;
+		}
+	}
+
+	if (!rsrc_data->irq_err_handle) {
+		rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_1,
+			err_irq_mask,
+			camif_res,
+			cam_vfe_camif_err_irq_top_half,
+			camif_res->bottom_half_handler,
+			camif_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_err_handle < 1) {
+			CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_err_handle = 0;
+		}
+	}
+
 	CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx);
 	return 0;
 }
@@ -455,6 +620,19 @@
 			camif_priv->camif_reg->vfe_diag_config);
 	}
 
+	if (camif_priv->irq_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			camif_priv->vfe_irq_controller, camif_priv->irq_handle);
+		camif_priv->irq_handle = 0;
+	}
+
+	if (camif_priv->irq_err_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			camif_priv->vfe_irq_controller,
+			camif_priv->irq_err_handle);
+		camif_priv->irq_err_handle = 0;
+	}
+
 	return rc;
 }
 
@@ -514,16 +692,46 @@
 static int cam_vfe_camif_handle_irq_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_node;
+	struct cam_vfe_mux_camif_data         *camif_priv;
+	struct cam_vfe_top_irq_evt_payload  *evt_payload;
+
+	camif_node = th_payload->handler_priv;
+	camif_priv = camif_node->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+	rc  = cam_vfe_camif_get_evt_payload(camif_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv,
 	void *evt_payload_priv)
 {
-	int                                   ret = CAM_VFE_IRQ_STATUS_ERR;
+	int                                   ret = CAM_VFE_IRQ_STATUS_MAX;
 	struct cam_isp_resource_node         *camif_node;
 	struct cam_vfe_mux_camif_data        *camif_priv;
-	struct cam_vfe_top_irq_evt_payload   *payload;
+	struct cam_vfe_top_irq_evt_payload *payload;
+	struct cam_isp_hw_event_info          evt_info;
 	uint32_t                              irq_status0;
 	uint32_t                              irq_status1;
 	uint32_t                              val;
@@ -539,70 +747,104 @@
 	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
 	irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
 
-	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
-	CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
+	evt_info.hw_idx   = camif_node->hw_intf->hw_idx;
+	evt_info.res_id   = camif_node->res_id;
+	evt_info.res_type = camif_node->res_type;
 
-	switch (payload->evt_id) {
-	case CAM_ISP_HW_EVENT_SOF:
-		if (irq_status0 & camif_priv->reg_data->sof_irq_mask) {
-			if ((camif_priv->enable_sof_irq_debug) &&
-				(camif_priv->irq_debug_cnt <=
-				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
-				CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+	CAM_DBG(CAM_ISP, "irq_status_0 = 0x%x irq_status_1 = 0x%x",
+		irq_status0, irq_status1);
 
-				camif_priv->irq_debug_cnt++;
-				if (camif_priv->irq_debug_cnt ==
-					CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
-					camif_priv->enable_sof_irq_debug =
-						false;
-					camif_priv->irq_debug_cnt = 0;
-				}
-			} else {
-				CAM_DBG(CAM_ISP, "Received SOF");
+	if (irq_status0 & camif_priv->reg_data->sof_irq_mask) {
+		if ((camif_priv->enable_sof_irq_debug) &&
+			(camif_priv->irq_debug_cnt <=
+			CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+
+			camif_priv->irq_debug_cnt++;
+			if (camif_priv->irq_debug_cnt ==
+				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
+				camif_priv->enable_sof_irq_debug =
+					false;
+				camif_priv->irq_debug_cnt = 0;
 			}
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EPOCH:
-		if (irq_status0 & camif_priv->reg_data->epoch0_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EPOCH");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_REG_UPDATE:
-		if (irq_status0 & camif_priv->reg_data->reg_update_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EOF:
-		if (irq_status0 & camif_priv->reg_data->eof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EOF\n");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_ERROR:
-		if (irq_status1 & camif_priv->reg_data->error_irq_mask1) {
-			CAM_DBG(CAM_ISP, "Received ERROR\n");
-			ret = CAM_ISP_HW_ERROR_OVERFLOW;
-			cam_vfe_camif_reg_dump(camif_node->res_priv);
-		} else {
-			ret = CAM_ISP_HW_ERROR_NONE;
-		}
+		} else
+			CAM_DBG(CAM_ISP, "Received SOF");
 
-		if (camif_priv->camif_debug &
-			CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
-			val = cam_io_r(camif_priv->mem_base +
-				camif_priv->camif_reg->vfe_diag_sensor_status);
-			CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x",
-				camif_priv->mem_base, val);
-		}
-		break;
-	default:
-		break;
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
 
-	CAM_DBG(CAM_ISP, "returing status = %d", ret);
+	if (irq_status0 & camif_priv->reg_data->epoch0_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EPOCH");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & camif_priv->reg_data->reg_update_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_REG_UPDATE, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & camif_priv->reg_data->eof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EOF\n");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_EOF, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & camif_priv->reg_data->error_irq_mask0) {
+		CAM_DBG(CAM_ISP, "Received ERROR\n");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		CAM_INFO(CAM_ISP, "Violation status = %x",
+			payload->irq_reg_val[2]);
+
+		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+		cam_vfe_camif_reg_dump(camif_node->res_priv);
+	}
+
+	if (irq_status1 & camif_priv->reg_data->error_irq_mask1) {
+		CAM_DBG(CAM_ISP, "Received ERROR\n");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		CAM_INFO(CAM_ISP, "Violation status = %x",
+			payload->irq_reg_val[2]);
+
+		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+		cam_vfe_camif_reg_dump(camif_node->res_priv);
+	}
+
+	if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+		val = cam_io_r(camif_priv->mem_base +
+			camif_priv->camif_reg->vfe_diag_sensor_status);
+		CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x",
+			camif_priv->mem_base, val);
+	}
+
+	cam_vfe_camif_put_evt_payload(camif_priv, &payload);
+
+	CAM_DBG(CAM_ISP, "returning status = %d", ret);
 	return ret;
 }
 
@@ -610,10 +852,12 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_hw_info,
-	struct cam_isp_resource_node  *camif_node)
+	struct cam_isp_resource_node  *camif_node,
+	void                          *vfe_irq_controller)
 {
 	struct cam_vfe_mux_camif_data     *camif_priv = NULL;
 	struct cam_vfe_camif_ver2_hw_info *camif_info = camif_hw_info;
+	int                                i = 0;
 
 	camif_priv = kzalloc(sizeof(struct cam_vfe_mux_camif_data),
 		GFP_KERNEL);
@@ -630,6 +874,7 @@
 	camif_priv->reg_data    = camif_info->reg_data;
 	camif_priv->hw_intf     = hw_intf;
 	camif_priv->soc_info    = soc_info;
+	camif_priv->vfe_irq_controller = vfe_irq_controller;
 
 	camif_node->init    = cam_vfe_camif_resource_init;
 	camif_node->deinit  = cam_vfe_camif_resource_deinit;
@@ -639,6 +884,14 @@
 	camif_node->top_half_handler = cam_vfe_camif_handle_irq_top_half;
 	camif_node->bottom_half_handler = cam_vfe_camif_handle_irq_bottom_half;
 
+	spin_lock_init(&camif_priv->spin_lock);
+	INIT_LIST_HEAD(&camif_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++) {
+		INIT_LIST_HEAD(&camif_priv->evt_payload[i].list);
+		list_add_tail(&camif_priv->evt_payload[i].list,
+			&camif_priv->free_payload_list);
+	}
+
 	return 0;
 }
 
@@ -646,6 +899,11 @@
 	struct cam_isp_resource_node  *camif_node)
 {
 	struct cam_vfe_mux_camif_data *camif_priv = camif_node->res_priv;
+	int                            i = 0;
+
+	INIT_LIST_HEAD(&camif_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++)
+		INIT_LIST_HEAD(&camif_priv->evt_payload[i].list);
 
 	camif_node->start = NULL;
 	camif_node->stop  = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
index 2927f35..e1cbc94 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE_CAMIF_VER2_H_
@@ -14,6 +14,8 @@
  */
 #define CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS      BIT(0)
 
+#define CAM_VFE_CAMIF_EVT_MAX                      256
+
 struct cam_vfe_camif_ver2_reg {
 	uint32_t     camif_cmd;
 	uint32_t     camif_config;
@@ -63,6 +65,8 @@
 	uint32_t     eof_irq_mask;
 	uint32_t     error_irq_mask0;
 	uint32_t     error_irq_mask1;
+	uint32_t     subscribe_irq_mask0;
+	uint32_t     subscribe_irq_mask1;
 
 	uint32_t     enable_diagnostic_hw;
 };
@@ -81,7 +85,8 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_hw_info,
-	struct cam_isp_resource_node  *camif_node);
+	struct cam_isp_resource_node  *camif_node,
+	void                          *vfe_irq_controller);
 
 int cam_vfe_camif_ver2_deinit(
 	struct cam_isp_resource_node  *camif_node);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
index a14f4df..ae1b8e5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
@@ -12,6 +12,8 @@
 #include "cam_vfe_soc.h"
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver3.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
 #include "cam_vfe_camif_ver3.h"
 #include "cam_debug_util.h"
 #include "cam_cdm_util.h"
@@ -28,6 +30,15 @@
 	struct cam_vfe_camif_ver3_reg_data          *reg_data;
 	struct cam_hw_soc_info                      *soc_info;
 
+	cam_hw_mgr_event_cb_func             event_cb;
+	void                                *priv;
+	int                                  irq_err_handle;
+	int                                  irq_handle;
+	void                                *vfe_irq_controller;
+	struct cam_vfe_top_irq_evt_payload   evt_payload[CAM_VFE_CAMIF_EVT_MAX];
+	struct list_head                     free_payload_list;
+	spinlock_t                           spin_lock;
+
 	enum cam_isp_hw_sync_mode          sync_mode;
 	uint32_t                           dsp_mode;
 	uint32_t                           pix_pattern;
@@ -40,6 +51,114 @@
 	uint32_t                           camif_debug;
 };
 
+static int cam_vfe_camif_ver3_get_evt_payload(
+	struct cam_vfe_mux_camif_ver3_data     *camif_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	int rc = 0;
+
+	spin_lock(&camif_priv->spin_lock);
+	if (list_empty(&camif_priv->free_payload_list)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
+		rc = -ENODEV;
+		goto done;
+	}
+
+	*evt_payload = list_first_entry(&camif_priv->free_payload_list,
+		struct cam_vfe_top_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+done:
+	spin_unlock(&camif_priv->spin_lock);
+	return rc;
+}
+
+static int cam_vfe_camif_ver3_put_evt_payload(
+	struct cam_vfe_mux_camif_ver3_data     *camif_priv,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	unsigned long flags;
+
+	if (!camif_priv) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&camif_priv->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list, &camif_priv->free_payload_list);
+	*evt_payload = NULL;
+	spin_unlock_irqrestore(&camif_priv->spin_lock, flags);
+
+	CAM_DBG(CAM_ISP, "Done");
+	return 0;
+}
+
+static int cam_vfe_camif_ver3_err_irq_top_half(
+	uint32_t                               evt_id,
+	struct cam_irq_th_payload             *th_payload)
+{
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_node;
+	struct cam_vfe_mux_camif_ver3_data    *camif_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+	bool                                   error_flag = false;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_2 = %x",
+		th_payload->evt_status_arr[0], th_payload->evt_status_arr[2]);
+
+	camif_node = th_payload->handler_priv;
+	camif_priv = camif_node->res_priv;
+	/*
+	 *  need to handle overflow condition here, otherwise irq storm
+	 *  will block everything
+	 */
+	if (th_payload->evt_status_arr[2] || (th_payload->evt_status_arr[0] &
+		camif_priv->reg_data->error_irq_mask0)) {
+		CAM_ERR(CAM_ISP,
+			"CAMIF Err VFE:%d: IRQ STATUS_0=0x%x STATUS_2=0x%x",
+			camif_node->hw_intf->hw_idx,
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[2]);
+		CAM_ERR(CAM_ISP, "Stopping further IRQ processing from VFE:%d",
+			camif_node->hw_intf->hw_idx);
+		cam_irq_controller_disable_irq(camif_priv->vfe_irq_controller,
+			camif_priv->irq_err_handle);
+		cam_irq_controller_clear_and_mask(evt_id,
+			camif_priv->vfe_irq_controller);
+		error_flag = true;
+	}
+
+	rc  = cam_vfe_camif_ver3_get_evt_payload(camif_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ STATUS_0=0x%x STATUS_2=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[2]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	evt_payload->irq_reg_val[i] = cam_io_r(camif_priv->mem_base +
+		camif_priv->common_reg->violation_status);
+
+	if (error_flag)
+		CAM_INFO(CAM_ISP, "Violation status = 0x%x",
+			evt_payload->irq_reg_val[i]);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
 static int cam_vfe_camif_ver3_validate_pix_pattern(uint32_t pattern)
 {
 	int rc;
@@ -140,6 +259,8 @@
 	camif_data->last_pixel  = acquire_data->vfe_in.in_port->left_stop;
 	camif_data->first_line  = acquire_data->vfe_in.in_port->line_start;
 	camif_data->last_line   = acquire_data->vfe_in.in_port->line_stop;
+	camif_data->event_cb    = acquire_data->event_cb;
+	camif_data->priv        = acquire_data->priv;
 
 	CAM_DBG(CAM_ISP, "hw id:%d pix_pattern:%d dsp_mode=%d",
 		camif_res->hw_intf->hw_idx,
@@ -227,6 +348,9 @@
 	uint32_t                             epoch0_line_cfg;
 	uint32_t                             epoch1_line_cfg;
 	uint32_t                             computed_epoch_line_cfg;
+	int                                  rc = 0;
+	uint32_t                        err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
+	uint32_t                        irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
 	struct cam_vfe_soc_private          *soc_private;
 
 	if (!camif_res) {
@@ -240,7 +364,16 @@
 		return -EINVAL;
 	}
 
+	memset(err_irq_mask, 0, sizeof(err_irq_mask));
+	memset(irq_mask, 0, sizeof(irq_mask));
+
 	rsrc_data = (struct cam_vfe_mux_camif_ver3_data *)camif_res->res_priv;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->reg_data->error_irq_mask0;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS2] =
+		rsrc_data->reg_data->error_irq_mask2;
+	irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->reg_data->subscribe_irq_mask1;
 
 	soc_private = rsrc_data->soc_info->soc_private;
 
@@ -337,6 +470,41 @@
 			rsrc_data->common_reg->diag_config);
 	}
 
+	if (!rsrc_data->irq_handle) {
+		rsrc_data->irq_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_0,
+			irq_mask,
+			camif_res,
+			camif_res->top_half_handler,
+			camif_res->bottom_half_handler,
+			camif_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_handle < 1) {
+			CAM_ERR(CAM_ISP, "IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_handle = 0;
+		}
+	}
+
+	if (!rsrc_data->irq_err_handle) {
+		rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_1,
+			err_irq_mask,
+			camif_res,
+			cam_vfe_camif_ver3_err_irq_top_half,
+			camif_res->bottom_half_handler,
+			camif_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_err_handle < 1) {
+			CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_err_handle = 0;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx);
 	return 0;
 }
 
@@ -489,6 +657,19 @@
 			camif_priv->common_reg->diag_config);
 	}
 
+	if (camif_priv->irq_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			camif_priv->vfe_irq_controller, camif_priv->irq_handle);
+		camif_priv->irq_handle = 0;
+	}
+
+	if (camif_priv->irq_err_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			camif_priv->vfe_irq_controller,
+			camif_priv->irq_err_handle);
+		camif_priv->irq_err_handle = 0;
+	}
+
 	return rc;
 }
 
@@ -551,7 +732,39 @@
 static int cam_vfe_camif_ver3_handle_irq_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *camif_node;
+	struct cam_vfe_mux_camif_ver3_data    *camif_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+
+	camif_node = th_payload->handler_priv;
+	camif_priv = camif_node->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+	CAM_DBG(CAM_ISP, "IRQ status_2 = 0x%x", th_payload->evt_status_arr[2]);
+
+	rc  = cam_vfe_camif_ver3_get_evt_payload(camif_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0: 0x%x status_1 : 0x%x status_2: 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1],
+			th_payload->evt_status_arr[2]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_camif_ver3_handle_irq_bottom_half(void *handler_priv,
@@ -561,6 +774,7 @@
 	struct cam_isp_resource_node         *camif_node;
 	struct cam_vfe_mux_camif_ver3_data   *camif_priv;
 	struct cam_vfe_top_irq_evt_payload   *payload;
+	struct cam_isp_hw_event_info          evt_info;
 	uint32_t                              irq_status0;
 	uint32_t                              irq_status1;
 	uint32_t                              irq_status2;
@@ -578,66 +792,91 @@
 	payload = evt_payload_priv;
 	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
 	irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
+	irq_status2 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS2];
 
-	CAM_DBG(CAM_ISP,
-		"evt_id:%d, irq_status0:0x%x, irq_status1:0x%x, irq_status2:0x%x",
-		payload->evt_id, irq_status0, irq_status1, irq_status2);
+	evt_info.hw_idx   = camif_node->hw_intf->hw_idx;
+	evt_info.res_id   = camif_node->res_id;
+	evt_info.res_type = camif_node->res_type;
 
-	switch (payload->evt_id) {
-	case CAM_ISP_HW_EVENT_SOF:
-		if (irq_status1 & camif_priv->reg_data->sof_irq_mask) {
-			if ((camif_priv->enable_sof_irq_debug) &&
-				(camif_priv->irq_debug_cnt <=
-				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
-				CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+	if (irq_status1 & camif_priv->reg_data->sof_irq_mask) {
+		if ((camif_priv->enable_sof_irq_debug) &&
+			(camif_priv->irq_debug_cnt <=
+			CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
 
-				camif_priv->irq_debug_cnt++;
-				if (camif_priv->irq_debug_cnt ==
-					CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
-					camif_priv->enable_sof_irq_debug =
-						false;
-					camif_priv->irq_debug_cnt = 0;
-				}
-			} else {
-				CAM_DBG(CAM_ISP, "Received SOF");
+			camif_priv->irq_debug_cnt++;
+			if (camif_priv->irq_debug_cnt ==
+				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
+				camif_priv->enable_sof_irq_debug =
+					false;
+				camif_priv->irq_debug_cnt = 0;
 			}
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EPOCH:
-		if (irq_status1 & camif_priv->reg_data->epoch0_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EPOCH");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EOF:
-		if (irq_status1 & camif_priv->reg_data->eof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EOF");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_ERROR:
-		if (irq_status2 & camif_priv->reg_data->error_irq_mask2) {
-			CAM_DBG(CAM_ISP, "Received ERROR");
-			ret = CAM_ISP_HW_ERROR_OVERFLOW;
-			cam_vfe_camif_ver3_reg_dump(camif_node->res_priv);
-		} else {
-			ret = CAM_ISP_HW_ERROR_NONE;
-		}
+		} else
+			CAM_DBG(CAM_ISP, "Received SOF");
 
-		if (camif_priv->camif_debug &
-			CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
-			val = cam_io_r(camif_priv->mem_base +
-				camif_priv->common_reg->diag_sensor_status_0);
-			CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x",
-				camif_priv->mem_base, val);
-		}
-		break;
-	default:
-		break;
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
 
-	CAM_DBG(CAM_ISP, "returing status = %d", ret);
+	if (irq_status1 & camif_priv->reg_data->epoch0_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EPOCH");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status1 & camif_priv->reg_data->eof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EOF");
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_EOF, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & camif_priv->reg_data->error_irq_mask0) {
+		CAM_ERR(CAM_ISP, "Received VFE Overflow ERROR\n");
+
+		evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW;
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+		cam_vfe_camif_ver3_reg_dump(camif_node->res_priv);
+	}
+
+	if (irq_status2 & camif_priv->reg_data->error_irq_mask2) {
+		CAM_ERR(CAM_ISP, "Received CAMIF Violation ERROR\n");
+
+		evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION;
+
+		if (camif_priv->event_cb)
+			camif_priv->event_cb(camif_priv->priv,
+				CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_VIOLATION;
+		cam_vfe_camif_ver3_reg_dump(camif_node->res_priv);
+	}
+
+	if (camif_priv->camif_debug & CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+		val = cam_io_r(camif_priv->mem_base +
+			camif_priv->common_reg->diag_sensor_status_0);
+		CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x",
+			camif_priv->mem_base, val);
+	}
+
+	cam_vfe_camif_ver3_put_evt_payload(camif_priv, &payload);
+
+	CAM_DBG(CAM_ISP, "returning status = %d", ret);
 	return ret;
 }
 
@@ -645,10 +884,12 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_hw_info,
-	struct cam_isp_resource_node  *camif_node)
+	struct cam_isp_resource_node  *camif_node,
+	void                          *vfe_irq_controller)
 {
 	struct cam_vfe_mux_camif_ver3_data *camif_priv = NULL;
 	struct cam_vfe_camif_ver3_hw_info *camif_info = camif_hw_info;
+	int i = 0;
 
 	camif_priv = kzalloc(sizeof(struct cam_vfe_mux_camif_ver3_data),
 		GFP_KERNEL);
@@ -663,6 +904,7 @@
 	camif_priv->reg_data    = camif_info->reg_data;
 	camif_priv->hw_intf     = hw_intf;
 	camif_priv->soc_info    = soc_info;
+	camif_priv->vfe_irq_controller = vfe_irq_controller;
 
 	camif_node->init    = cam_vfe_camif_ver3_resource_init;
 	camif_node->deinit  = cam_vfe_camif_ver3_resource_deinit;
@@ -672,6 +914,13 @@
 	camif_node->top_half_handler = cam_vfe_camif_ver3_handle_irq_top_half;
 	camif_node->bottom_half_handler =
 		cam_vfe_camif_ver3_handle_irq_bottom_half;
+	spin_lock_init(&camif_priv->spin_lock);
+	INIT_LIST_HEAD(&camif_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++) {
+		INIT_LIST_HEAD(&camif_priv->evt_payload[i].list);
+		list_add_tail(&camif_priv->evt_payload[i].list,
+			&camif_priv->free_payload_list);
+	}
 
 	return 0;
 }
@@ -680,6 +929,7 @@
 	struct cam_isp_resource_node  *camif_node)
 {
 	struct cam_vfe_mux_camif_ver3_data *camif_priv;
+	int i = 0;
 
 	if (!camif_node) {
 		CAM_ERR(CAM_ISP, "Error, camif_node is NULL %pK", camif_node);
@@ -688,6 +938,12 @@
 
 	camif_priv = camif_node->res_priv;
 
+	INIT_LIST_HEAD(&camif_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_CAMIF_EVT_MAX; i++)
+		INIT_LIST_HEAD(&camif_priv->evt_payload[i].list);
+
+	camif_priv = camif_node->res_priv;
+
 	camif_node->start = NULL;
 	camif_node->stop  = NULL;
 	camif_node->process_cmd = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h
index 70daa13..1292829 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h
@@ -13,6 +13,7 @@
  * Debug values for camif module
  */
 #define CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS      BIT(0)
+#define CAM_VFE_CAMIF_EVT_MAX                      256
 
 struct cam_vfe_camif_ver3_pp_clc_reg {
 	uint32_t     hw_version;
@@ -52,10 +53,10 @@
 	uint32_t     sof_irq_mask;
 	uint32_t     epoch0_irq_mask;
 	uint32_t     epoch1_irq_mask;
-	uint32_t     reg_update_irq_mask;
 	uint32_t     eof_irq_mask;
 	uint32_t     error_irq_mask0;
 	uint32_t     error_irq_mask2;
+	uint32_t     subscribe_irq_mask1;
 
 	uint32_t     enable_diagnostic_hw;
 	uint32_t     pp_camif_cfg_en_shift;
@@ -76,7 +77,8 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *camif_hw_info,
-	struct cam_isp_resource_node  *camif_node);
+	struct cam_isp_resource_node  *camif_node,
+	void                          *vfe_irq_controller);
 
 int cam_vfe_camif_ver3_deinit(
 	struct cam_isp_resource_node  *camif_node);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c
index 3a4036b..941523a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c
@@ -488,8 +488,9 @@
 {
 	int                                   ret = CAM_VFE_IRQ_STATUS_ERR;
 	struct cam_isp_resource_node         *fe_node;
-	struct cam_vfe_mux_fe_data        *fe_priv;
+	struct cam_vfe_mux_fe_data           *fe_priv;
 	struct cam_vfe_top_irq_evt_payload   *payload;
+	struct cam_isp_hw_event_info          evt_info;
 	uint32_t                              irq_status0;
 	uint32_t                              irq_status1;
 
@@ -504,59 +505,53 @@
 	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
 	irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
 
-	CAM_DBG(CAM_ISP, "event ID:%d, irq_status_0 = 0x%x",
-			payload->evt_id, irq_status0);
+	evt_info.hw_idx = fe_node->hw_intf->hw_idx;
+	evt_info.res_id = fe_node->res_id;
+	evt_info.res_type = fe_node->res_type;
 
-	switch (payload->evt_id) {
-	case CAM_ISP_HW_EVENT_SOF:
-		if (irq_status0 & fe_priv->reg_data->sof_irq_mask) {
-			if ((fe_priv->enable_sof_irq_debug) &&
-				(fe_priv->irq_debug_cnt <=
-				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
-				CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+	CAM_DBG(CAM_ISP, "event ID, irq_status_0 = 0x%x", irq_status0);
 
-				fe_priv->irq_debug_cnt++;
-				if (fe_priv->irq_debug_cnt ==
-					CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
-					fe_priv->enable_sof_irq_debug =
-						false;
-					fe_priv->irq_debug_cnt = 0;
-				}
-			} else {
-				CAM_DBG(CAM_ISP, "Received SOF");
+	if (irq_status0 & fe_priv->reg_data->sof_irq_mask) {
+		if ((fe_priv->enable_sof_irq_debug) &&
+			(fe_priv->irq_debug_cnt <=
+			CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+
+			fe_priv->irq_debug_cnt++;
+			if (fe_priv->irq_debug_cnt ==
+				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
+				fe_priv->enable_sof_irq_debug =
+					false;
+				fe_priv->irq_debug_cnt = 0;
 			}
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EPOCH:
-		if (irq_status0 & fe_priv->reg_data->epoch0_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EPOCH");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_REG_UPDATE:
-		if (irq_status0 & fe_priv->reg_data->reg_update_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_EOF:
-		if (irq_status0 & fe_priv->reg_data->eof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received EOF\n");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_ERROR:
-		if (irq_status1 & fe_priv->reg_data->error_irq_mask1) {
-			CAM_DBG(CAM_ISP, "Received ERROR\n");
-			ret = CAM_ISP_HW_ERROR_OVERFLOW;
-			cam_vfe_fe_reg_dump(fe_node);
 		} else {
-			ret = CAM_ISP_HW_ERROR_NONE;
+			CAM_DBG(CAM_ISP, "Received SOF");
 		}
-		break;
-	default:
-		break;
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & fe_priv->reg_data->epoch0_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EPOCH");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & fe_priv->reg_data->reg_update_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status0 & fe_priv->reg_data->eof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received EOF\n");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	if (irq_status1 & fe_priv->reg_data->error_irq_mask1) {
+		CAM_DBG(CAM_ISP, "Received ERROR\n");
+		ret = CAM_ISP_HW_ERROR_OVERFLOW;
+		evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW;
+		cam_vfe_fe_reg_dump(fe_node);
+	} else {
+		ret = CAM_ISP_HW_ERROR_NONE;
 	}
 
 	CAM_DBG(CAM_ISP, "returing status = %d", ret);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
index 49079a8..0b230ce 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -8,20 +8,138 @@
 #include "cam_isp_hw_mgr_intf.h"
 #include "cam_isp_hw.h"
 #include "cam_vfe_hw_intf.h"
+#include "cam_vfe_top_ver2.h"
 #include "cam_io_util.h"
 #include "cam_debug_util.h"
 #include "cam_cdm_util.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
 
 struct cam_vfe_mux_rdi_data {
 	void __iomem                                *mem_base;
 	struct cam_hw_intf                          *hw_intf;
 	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
 	struct cam_vfe_rdi_ver2_reg                 *rdi_reg;
+	struct cam_vfe_rdi_common_reg_data          *rdi_common_reg_data;
 	struct cam_vfe_rdi_reg_data                 *reg_data;
 
+	cam_hw_mgr_event_cb_func              event_cb;
+	void                                 *priv;
+	int                                   irq_err_handle;
+	int                                   irq_handle;
+	void                                 *vfe_irq_controller;
+	struct cam_vfe_top_irq_evt_payload    evt_payload[CAM_VFE_RDI_EVT_MAX];
+	struct list_head                      free_payload_list;
+	spinlock_t                            spin_lock;
+
 	enum cam_isp_hw_sync_mode          sync_mode;
 };
 
+static int cam_vfe_rdi_get_evt_payload(
+	struct cam_vfe_mux_rdi_data              *rdi_priv,
+	struct cam_vfe_top_irq_evt_payload     **evt_payload)
+{
+	int rc = 0;
+
+	spin_lock(&rdi_priv->spin_lock);
+	if (list_empty(&rdi_priv->free_payload_list)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
+		rc = -ENODEV;
+		goto done;
+	}
+
+	*evt_payload = list_first_entry(&rdi_priv->free_payload_list,
+		struct cam_vfe_top_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+	rc = 0;
+done:
+	spin_unlock(&rdi_priv->spin_lock);
+	return rc;
+}
+
+static int cam_vfe_rdi_put_evt_payload(
+	struct cam_vfe_mux_rdi_data              *rdi_priv,
+	struct cam_vfe_top_irq_evt_payload      **evt_payload)
+{
+	unsigned long flags;
+
+	if (!rdi_priv) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&rdi_priv->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list, &rdi_priv->free_payload_list);
+	*evt_payload = NULL;
+	spin_unlock_irqrestore(&rdi_priv->spin_lock, flags);
+
+	CAM_DBG(CAM_ISP, "Done");
+	return 0;
+}
+
+static int cam_vfe_rdi_err_irq_top_half(
+	uint32_t                               evt_id,
+	struct cam_irq_th_payload             *th_payload)
+{
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *rdi_node;
+	struct cam_vfe_mux_rdi_data           *rdi_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+	bool                                   error_flag = false;
+
+	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+	rdi_node = th_payload->handler_priv;
+	rdi_priv = rdi_node->res_priv;
+	/*
+	 *  need to handle overflow condition here, otherwise irq storm
+	 *  will block everything
+	 */
+	if (th_payload->evt_status_arr[1]) {
+		CAM_ERR(CAM_ISP,
+			"RDI Error: vfe:%d: STATUS_1=0x%x",
+			rdi_node->hw_intf->hw_idx,
+			th_payload->evt_status_arr[1]);
+		CAM_ERR(CAM_ISP, "Stopping further IRQ processing from vfe=%d",
+			rdi_node->hw_intf->hw_idx);
+		cam_irq_controller_disable_irq(rdi_priv->vfe_irq_controller,
+			rdi_priv->irq_err_handle);
+		cam_irq_controller_clear_and_mask(evt_id,
+			rdi_priv->vfe_irq_controller);
+		error_flag = true;
+	}
+
+	rc  = cam_vfe_rdi_get_evt_payload(rdi_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "STATUS_1=0x%x",
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	evt_payload->irq_reg_val[i] = cam_io_r(rdi_priv->mem_base +
+			rdi_priv->common_reg->violation_status);
+
+	if (error_flag)
+		CAM_INFO(CAM_ISP, "Violation status = 0x%x",
+			evt_payload->irq_reg_val[i]);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
 static int cam_vfe_rdi_get_reg_update(
 	struct cam_isp_resource_node  *rdi_res,
 	void *cmd_args, uint32_t arg_size)
@@ -80,6 +198,8 @@
 	rdi_data     = (struct cam_vfe_mux_rdi_data *)rdi_res->res_priv;
 	acquire_data = (struct cam_vfe_acquire_args *)acquire_param;
 
+	rdi_data->event_cb    = acquire_data->event_cb;
+	rdi_data->priv        = acquire_data->priv;
 	rdi_data->sync_mode   = acquire_data->vfe_in.sync_mode;
 
 	return 0;
@@ -90,6 +210,8 @@
 {
 	struct cam_vfe_mux_rdi_data   *rsrc_data;
 	int                            rc = 0;
+	uint32_t                       err_irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
+	uint32_t                       irq_mask[CAM_IFE_IRQ_REGISTERS_MAX];
 
 	if (!rdi_res) {
 		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -103,15 +225,61 @@
 	}
 
 	rsrc_data = (struct cam_vfe_mux_rdi_data  *)rdi_res->res_priv;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->rdi_common_reg_data->error_irq_mask0;
+	err_irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->rdi_common_reg_data->error_irq_mask1;
+	irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS0] =
+		rsrc_data->rdi_common_reg_data->subscribe_irq_mask0;
+	irq_mask[CAM_IFE_IRQ_CAMIF_REG_STATUS1] =
+		rsrc_data->rdi_common_reg_data->subscribe_irq_mask1;
+
 	rdi_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
 	/* Reg Update */
 	cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data,
 		rsrc_data->mem_base + rsrc_data->rdi_reg->reg_update_cmd);
 
+	if (!rsrc_data->irq_err_handle) {
+		rsrc_data->irq_err_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_1,
+			err_irq_mask,
+			rdi_res,
+			cam_vfe_rdi_err_irq_top_half,
+			rdi_res->bottom_half_handler,
+			rdi_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_err_handle < 1) {
+			CAM_ERR(CAM_ISP, "Error IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_err_handle = 0;
+		}
+	}
+
+	if (!rdi_res->rdi_only_ctx)
+		goto end;
+
+	if (!rsrc_data->irq_handle) {
+		rsrc_data->irq_handle = cam_irq_controller_subscribe_irq(
+			rsrc_data->vfe_irq_controller,
+			CAM_IRQ_PRIORITY_0,
+			irq_mask,
+			rdi_res,
+			rdi_res->top_half_handler,
+			rdi_res->bottom_half_handler,
+			rdi_res->tasklet_info,
+			&tasklet_bh_api);
+		if (rsrc_data->irq_handle < 1) {
+			CAM_ERR(CAM_ISP, "IRQ handle subscribe failure");
+			rc = -ENOMEM;
+			rsrc_data->irq_handle = 0;
+		}
+	}
+
 	CAM_DBG(CAM_ISP, "Start RDI %d",
 		rdi_res->res_id - CAM_ISP_HW_VFE_IN_RDI0);
-
+end:
 	return rc;
 }
 
@@ -119,7 +287,7 @@
 static int cam_vfe_rdi_resource_stop(
 	struct cam_isp_resource_node        *rdi_res)
 {
-	struct cam_vfe_mux_rdi_data           *rdi_priv;
+	struct cam_vfe_mux_rdi_data         *rdi_priv;
 	int rc = 0;
 
 	if (!rdi_res) {
@@ -136,6 +304,17 @@
 	if (rdi_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
 		rdi_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 
+	if (rdi_priv->irq_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			rdi_priv->vfe_irq_controller, rdi_priv->irq_handle);
+		rdi_priv->irq_handle = 0;
+	}
+
+	if (rdi_priv->irq_err_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			rdi_priv->vfe_irq_controller, rdi_priv->irq_err_handle);
+		rdi_priv->irq_err_handle = 0;
+	}
 
 	return rc;
 }
@@ -167,7 +346,36 @@
 static int cam_vfe_rdi_handle_irq_top_half(uint32_t evt_id,
 	struct cam_irq_th_payload *th_payload)
 {
-	return -EPERM;
+	int32_t                                rc;
+	int                                    i;
+	struct cam_isp_resource_node          *rdi_node;
+	struct cam_vfe_mux_rdi_data           *rdi_priv;
+	struct cam_vfe_top_irq_evt_payload    *evt_payload;
+
+	rdi_node = th_payload->handler_priv;
+	rdi_priv = rdi_node->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+	rc  = cam_vfe_rdi_get_evt_payload(rdi_priv, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
 }
 
 static int cam_vfe_rdi_handle_irq_bottom_half(void *handler_priv,
@@ -177,6 +385,7 @@
 	struct cam_isp_resource_node        *rdi_node;
 	struct cam_vfe_mux_rdi_data         *rdi_priv;
 	struct cam_vfe_top_irq_evt_payload  *payload;
+	struct cam_isp_hw_event_info         evt_info;
 	uint32_t                             irq_status0;
 
 	if (!handler_priv || !evt_payload_priv) {
@@ -187,28 +396,34 @@
 	rdi_node = handler_priv;
 	rdi_priv = rdi_node->res_priv;
 	payload = evt_payload_priv;
+
 	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
 
-	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+	evt_info.hw_idx   = rdi_node->hw_intf->hw_idx;
+	evt_info.res_id   = rdi_node->res_id;
+	evt_info.res_type = rdi_node->res_type;
+
 	CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
 
-	switch (payload->evt_id) {
-	case CAM_ISP_HW_EVENT_SOF:
-		if (irq_status0 & rdi_priv->reg_data->sof_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received SOF");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	case CAM_ISP_HW_EVENT_REG_UPDATE:
-		if (irq_status0 & rdi_priv->reg_data->reg_update_irq_mask) {
-			CAM_DBG(CAM_ISP, "Received REG UPDATE");
-			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
-		}
-		break;
-	default:
-		break;
+	if (irq_status0 & rdi_priv->reg_data->sof_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received SOF");
+
+		if (rdi_priv->event_cb)
+			rdi_priv->event_cb(rdi_priv->priv,
+				CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	} else if (irq_status0 & rdi_priv->reg_data->reg_update_irq_mask) {
+		CAM_DBG(CAM_ISP, "Received REG UPDATE");
+
+		if (rdi_priv->event_cb)
+			rdi_priv->event_cb(rdi_priv->priv,
+				CAM_ISP_HW_EVENT_REG_UPDATE, (void *)&evt_info);
+
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
 
+	cam_vfe_rdi_put_evt_payload(rdi_priv, &payload);
 	CAM_DBG(CAM_ISP, "returing status = %d", ret);
 	return ret;
 }
@@ -217,10 +432,12 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *rdi_hw_info,
-	struct cam_isp_resource_node  *rdi_node)
+	struct cam_isp_resource_node  *rdi_node,
+	void                          *vfe_irq_controller)
 {
 	struct cam_vfe_mux_rdi_data     *rdi_priv = NULL;
 	struct cam_vfe_rdi_ver2_hw_info *rdi_info = rdi_hw_info;
+	int                              i = 0;
 
 	rdi_priv = kzalloc(sizeof(struct cam_vfe_mux_rdi_data),
 			GFP_KERNEL);
@@ -235,6 +452,8 @@
 	rdi_priv->hw_intf    = hw_intf;
 	rdi_priv->common_reg = rdi_info->common_reg;
 	rdi_priv->rdi_reg    = rdi_info->rdi_reg;
+	rdi_priv->vfe_irq_controller  = vfe_irq_controller;
+	rdi_priv->rdi_common_reg_data = rdi_info->common_reg_data;
 
 	switch (rdi_node->res_id) {
 	case CAM_ISP_HW_VFE_IN_RDI0:
@@ -265,6 +484,14 @@
 	rdi_node->top_half_handler = cam_vfe_rdi_handle_irq_top_half;
 	rdi_node->bottom_half_handler = cam_vfe_rdi_handle_irq_bottom_half;
 
+	spin_lock_init(&rdi_priv->spin_lock);
+	INIT_LIST_HEAD(&rdi_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_RDI_EVT_MAX; i++) {
+		INIT_LIST_HEAD(&rdi_priv->evt_payload[i].list);
+		list_add_tail(&rdi_priv->evt_payload[i].list,
+			&rdi_priv->free_payload_list);
+	}
+
 	return 0;
 err_init:
 	kfree(rdi_priv);
@@ -275,6 +502,11 @@
 	struct cam_isp_resource_node  *rdi_node)
 {
 	struct cam_vfe_mux_rdi_data *rdi_priv = rdi_node->res_priv;
+	int                          i = 0;
+
+	INIT_LIST_HEAD(&rdi_priv->free_payload_list);
+	for (i = 0; i < CAM_VFE_RDI_EVT_MAX; i++)
+		INIT_LIST_HEAD(&rdi_priv->evt_payload[i].list);
 
 	rdi_node->start = NULL;
 	rdi_node->stop  = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
index 797ed55..c570e84 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE_RDI_H_
@@ -11,19 +11,30 @@
 
 #define CAM_VFE_RDI_VER2_MAX  4
 
+#define CAM_VFE_RDI_EVT_MAX   256
+
 struct cam_vfe_rdi_ver2_reg {
 	uint32_t     reg_update_cmd;
 };
 
+struct cam_vfe_rdi_common_reg_data {
+	uint32_t     subscribe_irq_mask0;
+	uint32_t     subscribe_irq_mask1;
+	uint32_t     error_irq_mask0;
+	uint32_t     error_irq_mask1;
+	uint32_t     error_irq_mask2;
+	uint32_t     rdi_frame_drop_mask;
+};
+
 struct cam_vfe_rdi_reg_data {
 	uint32_t     reg_update_cmd_data;
 	uint32_t     sof_irq_mask;
 	uint32_t     reg_update_irq_mask;
 };
-
 struct cam_vfe_rdi_ver2_hw_info {
 	struct cam_vfe_top_ver2_reg_offset_common  *common_reg;
 	struct cam_vfe_rdi_ver2_reg                *rdi_reg;
+	struct cam_vfe_rdi_common_reg_data         *common_reg_data;
 	struct cam_vfe_rdi_reg_data  *reg_data[CAM_VFE_RDI_VER2_MAX];
 };
 
@@ -35,7 +46,8 @@
 	struct cam_hw_intf            *hw_intf,
 	struct cam_hw_soc_info        *soc_info,
 	void                          *rdi_hw_info,
-	struct cam_isp_resource_node  *rdi_node);
+	struct cam_isp_resource_node  *rdi_node,
+	void                          *vfe_irq_controller);
 
 int cam_vfe_rdi_ver2_deinit(
 	struct cam_isp_resource_node  *rdi_node);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
index 287a10e..077f890 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
@@ -12,18 +12,19 @@
 	struct cam_hw_soc_info        *soc_info,
 	struct cam_hw_intf            *hw_intf,
 	void                          *top_hw_info,
-	struct cam_vfe_top            **vfe_top)
+	void                          *vfe_irq_controller,
+	struct cam_vfe_top           **vfe_top)
 {
 	int rc = -EINVAL;
 
 	switch (top_version) {
 	case CAM_VFE_TOP_VER_2_0:
 		rc = cam_vfe_top_ver2_init(soc_info, hw_intf, top_hw_info,
-			vfe_top);
+			vfe_irq_controller, vfe_top);
 		break;
 	case CAM_VFE_TOP_VER_3_0:
 		rc = cam_vfe_top_ver3_init(soc_info, hw_intf, top_hw_info,
-			vfe_top);
+			vfe_irq_controller, vfe_top);
 		break;
 	default:
 		CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index 535cf71..3787fa1 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -703,6 +703,7 @@
 	struct cam_hw_soc_info                 *soc_info,
 	struct cam_hw_intf                     *hw_intf,
 	void                                   *top_hw_info,
+	void                                   *vfe_irq_controller,
 	struct cam_vfe_top                    **vfe_top_ptr)
 {
 	int i, j, rc = 0;
@@ -760,7 +761,7 @@
 
 			rc = cam_vfe_camif_ver2_init(hw_intf, soc_info,
 				&ver2_hw_info->camif_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver2_hw_info->mux_type[i] ==
@@ -777,7 +778,7 @@
 
 			rc = cam_vfe_camif_lite_ver2_init(hw_intf, soc_info,
 				&ver2_hw_info->camif_lite_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 
 			if (rc)
 				goto deinit_resources;
@@ -796,7 +797,7 @@
 
 			rc = cam_vfe_rdi_ver2_init(hw_intf, soc_info,
 				&ver2_hw_info->rdi_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver2_hw_info->mux_type[i] ==
@@ -845,11 +846,9 @@
 				&top_priv->mux_rsrc[i]))
 				CAM_ERR(CAM_ISP, "Camif lite deinit failed");
 		} else if (ver2_hw_info->mux_type[i] ==
-			CAM_ISP_HW_VFE_IN_RDI0) {
-			if (cam_vfe_rdi_ver2_init(hw_intf, soc_info,
-				&ver2_hw_info->rdi_hw_info,
-				&top_priv->mux_rsrc[i]))
-				CAM_ERR(CAM_ISP, "RDI deinit failed");
+			CAM_VFE_IN_RD_VER_1_0) {
+			if (cam_vfe_fe_ver1_deinit(&top_priv->mux_rsrc[i]))
+				CAM_ERR(CAM_ISP, "VFE FE deinit failed");
 		} else {
 			if (cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]))
 				CAM_ERR(CAM_ISP, "RDI Deinit failed");
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
index aeaa73d..82e30b4 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE_TOP_VER2_H_
@@ -56,7 +56,8 @@
 int cam_vfe_top_ver2_init(struct cam_hw_soc_info     *soc_info,
 	struct cam_hw_intf                           *hw_intf,
 	void                                         *top_hw_info,
-	struct cam_vfe_top                          **vfe_top);
+	void                                         *vfe_irq_controller,
+	struct cam_vfe_top                          **vfe_top_ptr);
 
 int cam_vfe_top_ver2_deinit(struct cam_vfe_top      **vfe_top);
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
index 6ff7848..3f0799e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
@@ -13,8 +13,10 @@
 #include "cam_cpas_api.h"
 #include "cam_vfe_soc.h"
 
-#define CAM_VFE_HW_RESET_HW_AND_REG_VAL  0x00000003
-#define CAM_VFE_HW_RESET_HW_VAL          0x007F0000
+#define CAM_VFE_HW_RESET_HW_AND_REG_VAL       0x00000003
+#define CAM_VFE_HW_RESET_HW_VAL               0x007F0000
+#define CAM_VFE_LITE_HW_RESET_AND_REG_VAL     0x00000002
+#define CAM_VFE_LITE_HW_RESET_HW_VAL          0x0000003D
 #define CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES 3
 
 struct cam_vfe_top_ver3_common_data {
@@ -436,20 +438,33 @@
 		return -EINVAL;
 	}
 
+	soc_info = top_priv->common_data.soc_info;
+	reg_common = top_priv->common_data.common_reg;
+
 	switch (*reset_reg_args) {
 	case CAM_VFE_HW_RESET_HW_AND_REG:
-		reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+		if (strnstr(soc_info->compatible, "lite",
+			strlen(soc_info->compatible)) == NULL)
+			reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+		else
+			reset_reg_val = CAM_VFE_LITE_HW_RESET_AND_REG_VAL;
 		break;
 	default:
-		reset_reg_val = CAM_VFE_HW_RESET_HW_VAL;
+		if (strnstr(soc_info->compatible, "lite",
+			strlen(soc_info->compatible)) == NULL)
+			reset_reg_val = CAM_VFE_HW_RESET_HW_VAL;
+		else
+			reset_reg_val = CAM_VFE_LITE_HW_RESET_HW_VAL;
 		break;
 	}
 	/* override due to hw limitation */
-	reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+	if (strnstr(soc_info->compatible, "lite",
+		strlen(soc_info->compatible)) == NULL)
+		reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+	else
+		reset_reg_val = CAM_VFE_LITE_HW_RESET_AND_REG_VAL;
 
-	CAM_DBG(CAM_ISP, "reset reg value: %x", reset_reg_val);
-	soc_info = top_priv->common_data.soc_info;
-	reg_common = top_priv->common_data.common_reg;
+	CAM_DBG(CAM_ISP, "reset reg value: 0x%x", reset_reg_val);
 
 	/* Mask All the IRQs except RESET */
 	if (strnstr(soc_info->compatible, "lite",
@@ -711,6 +726,7 @@
 	struct cam_hw_soc_info                 *soc_info,
 	struct cam_hw_intf                     *hw_intf,
 	void                                   *top_hw_info,
+	void                                   *vfe_irq_controller,
 	struct cam_vfe_top                    **vfe_top_ptr)
 {
 	int i, j, rc = 0;
@@ -768,7 +784,7 @@
 
 			rc = cam_vfe_camif_ver3_init(hw_intf, soc_info,
 				&ver3_hw_info->camif_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver3_hw_info->mux_type[i] ==
@@ -779,7 +795,7 @@
 
 			rc = cam_vfe_camif_lite_ver3_init(hw_intf, soc_info,
 				&ver3_hw_info->pdlib_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver3_hw_info->mux_type[i] ==
@@ -801,7 +817,7 @@
 
 			rc = cam_vfe_camif_lite_ver3_init(hw_intf, soc_info,
 				ver3_hw_info->rdi_hw_info[j++],
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else if (ver3_hw_info->mux_type[i] ==
@@ -812,7 +828,7 @@
 
 			rc = cam_vfe_camif_lite_ver3_init(hw_intf, soc_info,
 				&ver3_hw_info->lcr_hw_info,
-				&top_priv->mux_rsrc[i]);
+				&top_priv->mux_rsrc[i], vfe_irq_controller);
 			if (rc)
 				goto deinit_resources;
 		} else {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
index 03630f0..a83048d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
@@ -52,6 +52,7 @@
 int cam_vfe_top_ver3_init(struct cam_hw_soc_info     *soc_info,
 	struct cam_hw_intf                           *hw_intf,
 	void                                         *top_hw_info,
+	void                                         *vfe_irq_controller,
 	struct cam_vfe_top                          **vfe_top);
 
 int cam_vfe_top_ver3_deinit(struct cam_vfe_top      **vfe_top);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
index 622ca64..a5236e5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
@@ -34,7 +34,8 @@
 	struct cam_hw_soc_info        *soc_info,
 	struct cam_hw_intf            *hw_intf,
 	void                          *top_hw_info,
-	struct cam_vfe_top            **vfe_top);
+	void                          *vfe_irq_controller,
+	struct cam_vfe_top           **vfe_top);
 
 int cam_vfe_top_deinit(uint32_t        top_version,
 	struct cam_vfe_top           **vfe_top);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
index 66d5fad..6925b3a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
@@ -9,3 +9,4 @@
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_ois/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_1_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_1_hwreg.h
index bbf2798..75e993b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_1_hwreg.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_1_hwreg.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_CSIPHY_1_2_1_HWREG_H_
@@ -15,7 +15,7 @@
 	.csiphy_common_array_size = 4,
 	.csiphy_reset_array_size = 4,
 	.csiphy_2ph_config_array_size = 20,
-	.csiphy_3ph_config_array_size = 32,
+	.csiphy_3ph_config_array_size = 31,
 	.csiphy_2ph_clock_lane = 0x1,
 	.csiphy_2ph_combo_ck_ln = 0x10,
 };
@@ -308,7 +308,6 @@
 		{0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0164, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x01DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0984, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x09B0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
@@ -317,10 +316,10 @@
 		{0x0A90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0A98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0A90, 0x17, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0A98, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0A8C, 0xBF, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A90, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A98, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A8C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0368, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x036C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
@@ -342,8 +341,7 @@
 		{0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0364, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x03DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0A84, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0AB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AB0, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 	{
@@ -351,10 +349,10 @@
 		{0x0B90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0B94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0B98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0B90, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0B94, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0B98, 0x1C, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0B8C, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B90, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B98, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B8C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0568, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x056C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
@@ -376,8 +374,7 @@
 		{0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0564, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x05DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0B84, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
-		{0x0BB0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BB0, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
 		{0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
 	},
 };
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index f71f53d..3cef9c6 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -1375,7 +1375,7 @@
 	vfree(e_ctrl->cal_data.map);
 	e_ctrl->cal_data.num_data = 0;
 	e_ctrl->cal_data.num_map = 0;
-	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
 release_buf:
 	if (cam_mem_put_cpu_buf(dev_config.packet_handle))
 		CAM_WARN(CAM_EEPROM, "Put cpu buffer failed : 0x%llx",
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index 10673d3..f2e973e 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -1366,6 +1366,42 @@
 }
 EXPORT_SYMBOL(cam_smmu_dealloc_qdss);
 
+int cam_smmu_get_io_region_info(int32_t smmu_hdl,
+	dma_addr_t *iova, size_t *len)
+{
+	int32_t idx;
+
+	if (!iova || !len || (smmu_hdl == HANDLE_INIT)) {
+		CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, smmu_hdl);
+		return -EINVAL;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].io_support) {
+		CAM_ERR(CAM_SMMU,
+			"I/O memory not supported for this SMMU handle");
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	*iova = iommu_cb_set.cb_info[idx].io_info.iova_start;
+	*len = iommu_cb_set.cb_info[idx].io_info.iova_len;
+
+	CAM_DBG(CAM_SMMU,
+		"I/O area for hdl = %x start addr = %pK len = %zu",
+		smmu_hdl, *iova, *len);
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+	return 0;
+}
+
 int cam_smmu_get_region_info(int32_t smmu_hdl,
 	enum cam_smmu_region_id region_id,
 	struct cam_smmu_region_info *region_info)
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
index 2cc91c7..eab27bb 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_SMMU_API_H_
@@ -381,4 +381,16 @@
  */
 int cam_smmu_dealloc_qdss(int32_t smmu_hdl);
 
+/**
+ * @brief Get start addr & len of I/O region for a given cb
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ * @param iova: IOVA address of allocated I/O region
+ * @param len: Length of allocated I/O memory
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_io_region_info(int32_t smmu_hdl,
+	dma_addr_t *iova, size_t *len);
+
 #endif /* _CAM_SMMU_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index f039cda..eb4e1f9 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -1718,3 +1718,25 @@
 
 	return 0;
 }
+
+uint32_t cam_soc_util_get_vote_level(struct cam_hw_soc_info *soc_info,
+	uint64_t clock_rate)
+{
+	int i = 0;
+
+	if (!clock_rate)
+		return CAM_SVS_VOTE;
+
+	for (i = 0; i < CAM_MAX_VOTE; i++) {
+		if (soc_info->clk_level_valid[i] &&
+			soc_info->clk_rate[i][soc_info->src_clk_idx] >=
+			clock_rate) {
+			CAM_DBG(CAM_UTIL,
+				"Clock rate %lld, selected clock level %d",
+				clock_rate, i);
+			return i;
+		}
+	}
+
+	return CAM_TURBO_VOTE;
+}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index 76ce930..dc45059 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -36,8 +36,10 @@
 #define CAM_SOC_MAX_CLK             32
 
 /* DDR device types */
-#define DDR_TYPE_LPDDR4       6
-#define DDR_TYPE_LPDDR5       9
+#define DDR_TYPE_LPDDR4        6
+#define DDR_TYPE_LPDDR4X       7
+#define DDR_TYPE_LPDDR5        8
+#define DDR_TYPE_LPDDR5X       9
 
 /**
  * enum cam_vote_level - Enum for voting level
@@ -631,4 +633,7 @@
 int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
 	enum cam_vote_level clk_level);
 
+uint32_t cam_soc_util_get_vote_level(struct cam_hw_soc_info *soc_info,
+	uint64_t clock_rate);
+
 #endif /* _CAM_SOC_UTIL_H_ */
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.c b/drivers/media/platform/msm/cvp/cvp_hfi.c
index 39cb5b3..1cea177 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.c
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.c
@@ -26,7 +26,6 @@
 #include <linux/soc/qcom/smem.h>
 #include <soc/qcom/subsystem_restart.h>
 #include <linux/dma-mapping.h>
-#include <linux/fastcvpd.h>
 #include "hfi_packetization.h"
 #include "msm_cvp_debug.h"
 #include "cvp_core_hfi.h"
@@ -249,17 +248,6 @@
 		}
 		break;
 	}
-	case HFI_CMD_SESSION_REGISTER_BUFFERS:
-	{
-		struct hfi_cmd_session_register_buffers_packet *pkt =
-			(struct hfi_cmd_session_register_buffers_packet *)
-			packet;
-		struct hfi_buffer_mapping_type *buf =
-			(struct hfi_buffer_mapping_type *)pkt->buffer;
-		for (i = 0; i < pkt->num_buffers; i++)
-			buf[i].device_addr -= fw_bias;
-		break;
-	}
 	default:
 		break;
 	}
@@ -328,7 +316,7 @@
 	}
 
 	dprintk(CVP_DBG, "%s: suspend dsp\n", __func__);
-	rc = fastcvpd_video_suspend(flags);
+	rc = cvp_dsp_suspend(flags);
 	if (rc) {
 		dprintk(CVP_ERR, "%s: dsp suspend failed with error %d\n",
 			__func__, rc);
@@ -353,7 +341,7 @@
 	}
 
 	dprintk(CVP_DBG, "%s: resume dsp\n", __func__);
-	rc = fastcvpd_video_resume(flags);
+	rc = cvp_dsp_resume(flags);
 	if (rc) {
 		dprintk(CVP_ERR,
 			"%s: dsp resume failed with error %d\n",
@@ -379,7 +367,7 @@
 	}
 
 	dprintk(CVP_DBG, "%s: shutdown dsp\n", __func__);
-	rc = fastcvpd_video_shutdown(flags);
+	rc = cvp_dsp_shutdown(flags);
 	if (rc) {
 		dprintk(CVP_ERR,
 			"%s: dsp shutdown failed with error %d\n",
@@ -2602,76 +2590,6 @@
 	return rc;
 }
 
-static int venus_hfi_session_register_buffer(void *sess,
-		struct cvp_register_buffer *buffer)
-{
-	int rc = 0;
-	u8 packet[CVP_IFACEQ_VAR_LARGE_PKT_SIZE];
-	struct hfi_cmd_session_register_buffers_packet *pkt;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device || !buffer) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	device = session->device;
-
-	mutex_lock(&device->lock);
-	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
-		goto exit;
-	}
-	pkt = (struct hfi_cmd_session_register_buffers_packet *)packet;
-	rc = call_hfi_pkt_op(device, session_register_buffer, pkt,
-			session, buffer);
-	if (rc) {
-		dprintk(CVP_ERR, "%s: failed to create packet\n", __func__);
-		goto exit;
-	}
-	if (__iface_cmdq_write(session->device, pkt))
-		rc = -ENOTEMPTY;
-exit:
-	mutex_unlock(&device->lock);
-
-	return rc;
-}
-
-static int venus_hfi_session_unregister_buffer(void *sess,
-		struct cvp_unregister_buffer *buffer)
-{
-	int rc = 0;
-	u8 packet[CVP_IFACEQ_VAR_LARGE_PKT_SIZE];
-	struct hfi_cmd_session_unregister_buffers_packet *pkt;
-	struct hal_session *session = sess;
-	struct venus_hfi_device *device;
-
-	if (!session || !session->device || !buffer) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	device = session->device;
-
-	mutex_lock(&device->lock);
-	if (!__is_session_valid(device, session, __func__)) {
-		rc = -EINVAL;
-		goto exit;
-	}
-	pkt = (struct hfi_cmd_session_unregister_buffers_packet *)packet;
-	rc = call_hfi_pkt_op(device, session_unregister_buffer, pkt,
-			session, buffer);
-	if (rc) {
-		dprintk(CVP_ERR, "%s: failed to create packet\n", __func__);
-		goto exit;
-	}
-	if (__iface_cmdq_write(session->device, pkt))
-		rc = -ENOTEMPTY;
-exit:
-	mutex_unlock(&device->lock);
-
-	return rc;
-}
-
 static int venus_hfi_session_start(void *session)
 {
 	dprintk(CVP_ERR, "Deprecated function %s\n", __func__);
@@ -2720,11 +2638,121 @@
 	return rc;
 }
 
-static int venus_hfi_session_cvp_dfs_config(void *sess,
-		struct msm_cvp_dfsconfig *dfs_config)
+static int venus_hfi_session_cvp_dme_config(void *sess,
+		struct msm_cvp_internal_dmeconfig *dme_config)
 {
 	int rc = 0;
-	struct hfi_cmd_session_cvp_dfs_config pkt;
+	struct hfi_cmd_session_cvp_dme_config_packet pkt;
+	struct hal_session *session = sess;
+	struct venus_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, "invalid session");
+		return -ENODEV;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	if (!__is_session_valid(device, session, __func__)) {
+		rc = -EINVAL;
+		goto dme_config_err;
+	}
+	rc = call_hfi_pkt_op(device, session_cvp_dme_config,
+			&pkt, session, dme_config);
+	if (rc) {
+		dprintk(CVP_ERR,
+				"Session get buf req: failed to create pkt\n");
+		goto dme_config_err;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+	dprintk(CVP_DBG, "%s: calling __iface_cmdq_write\n", __func__);
+dme_config_err:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int venus_hfi_session_cvp_dme_frame(void *sess,
+				struct msm_cvp_internal_dmeframe *dme_frame)
+{
+	int rc = 0;
+	struct hfi_cmd_session_cvp_dme_frame_packet pkt;
+	struct hal_session *session = sess;
+	struct venus_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, "invalid session");
+		return -ENODEV;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	if (!__is_session_valid(device, session, __func__)) {
+		rc = -EINVAL;
+		goto dme_frame_err;
+	}
+	rc = call_hfi_pkt_op(device, session_cvp_dme_frame,
+			&pkt, session, dme_frame);
+	if (rc) {
+		dprintk(CVP_ERR,
+				"Session get buf req: failed to create pkt\n");
+		goto dme_frame_err;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+	dprintk(CVP_DBG, "%s: done calling __iface_cmdq_write\n", __func__);
+dme_frame_err:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+
+static int venus_hfi_session_cvp_persist(void *sess,
+		struct msm_cvp_internal_persist_cmd *pbuf_cmd)
+{
+	int rc = 0;
+	struct hfi_cmd_session_cvp_persist_packet pkt;
+	struct hal_session *session = sess;
+	struct venus_hfi_device *device;
+
+	if (!session || !session->device) {
+		dprintk(CVP_ERR, "invalid session");
+		return -ENODEV;
+	}
+
+	device = session->device;
+	mutex_lock(&device->lock);
+
+	if (!__is_session_valid(device, session, __func__)) {
+		rc = -EINVAL;
+		goto persist_err;
+	}
+	rc = call_hfi_pkt_op(device, session_cvp_persist,
+			&pkt, session, pbuf_cmd);
+	if (rc) {
+		dprintk(CVP_ERR,
+				"Failed to create persist pkt\n");
+		goto persist_err;
+	}
+
+	if (__iface_cmdq_write(session->device, &pkt))
+		rc = -ENOTEMPTY;
+
+	dprintk(CVP_DBG, "%s: done calling __iface_cmdq_write\n", __func__);
+persist_err:
+	mutex_unlock(&device->lock);
+	return rc;
+}
+
+static int venus_hfi_session_cvp_dfs_config(void *sess,
+		struct msm_cvp_internal_dfsconfig *dfs_config)
+{
+	int rc = 0;
+	struct hfi_cmd_session_cvp_dfs_config_packet pkt;
 	struct hal_session *session = sess;
 	struct venus_hfi_device *device;
 
@@ -2760,10 +2788,10 @@
 }
 
 static int venus_hfi_session_cvp_dfs_frame(void *sess,
-				struct msm_cvp_dfsframe *dfs_frame)
+				struct msm_cvp_internal_dfsframe *dfs_frame)
 {
 	int rc = 0;
-	struct hfi_cmd_session_cvp_dfs_frame pkt;
+	struct hfi_cmd_session_cvp_dfs_frame_packet pkt;
 	struct hal_session *session = sess;
 	struct venus_hfi_device *device;
 
@@ -2796,12 +2824,6 @@
 	return rc;
 }
 
-static int venus_hfi_session_cvp_send_cmd(void *sess,
-	struct cvp_frame_data *input_frame)
-{
-	return 0;
-}
-
 static int venus_hfi_session_get_buf_req(void *sess)
 {
 	struct hfi_cmd_session_get_property_packet pkt;
@@ -3205,6 +3227,80 @@
 	return NULL;
 }
 
+#define _INVALID_MSG_ "Unrecognized MSG (%#x) session (%pK), discarding\n"
+#define _INVALID_STATE_ "Ignore responses from %d to %d invalid state\n"
+#define _DEVFREQ_FAIL_ "Failed to add devfreq device bus %s governor %s: %d\n"
+
+static void process_system_msg(struct msm_cvp_cb_info *info,
+		struct venus_hfi_device *device,
+		void *raw_packet)
+{
+	struct cvp_hal_sys_init_done sys_init_done = {0};
+
+	switch (info->response_type) {
+	case HAL_SYS_ERROR:
+		__process_sys_error(device);
+		break;
+	case HAL_SYS_RELEASE_RESOURCE_DONE:
+		dprintk(CVP_DBG, "Received SYS_RELEASE_RESOURCE\n");
+		break;
+	case HAL_SYS_INIT_DONE:
+		dprintk(CVP_DBG, "Received SYS_INIT_DONE\n");
+
+		sys_init_done.capabilities =
+			device->sys_init_capabilities;
+		cvp_hfi_process_sys_init_done_prop_read(
+			(struct hfi_msg_sys_init_done_packet *)
+				raw_packet, &sys_init_done);
+		info->response.cmd.data.sys_init_done = sys_init_done;
+		break;
+	default:
+		break;
+	}
+}
+
+
+static void **get_session_id(struct msm_cvp_cb_info *info)
+{
+	void **session_id = NULL;
+
+	/* For session-related packets, validate session */
+	switch (info->response_type) {
+	case HAL_SESSION_INIT_DONE:
+	case HAL_SESSION_END_DONE:
+	case HAL_SESSION_ABORT_DONE:
+	case HAL_SESSION_STOP_DONE:
+	case HAL_SESSION_FLUSH_DONE:
+	case HAL_SESSION_SET_BUFFER_DONE:
+	case HAL_SESSION_SUSPEND_DONE:
+	case HAL_SESSION_RESUME_DONE:
+	case HAL_SESSION_SET_PROP_DONE:
+	case HAL_SESSION_GET_PROP_DONE:
+	case HAL_SESSION_RELEASE_BUFFER_DONE:
+	case HAL_SESSION_REGISTER_BUFFER_DONE:
+	case HAL_SESSION_UNREGISTER_BUFFER_DONE:
+	case HAL_SESSION_DFS_CONFIG_CMD_DONE:
+	case HAL_SESSION_DME_CONFIG_CMD_DONE:
+	case HAL_SESSION_DFS_FRAME_CMD_DONE:
+	case HAL_SESSION_DME_FRAME_CMD_DONE:
+	case HAL_SESSION_PERSIST_CMD_DONE:
+	case HAL_SESSION_PROPERTY_INFO:
+		session_id = &info->response.cmd.session_id;
+		break;
+	case HAL_SESSION_ERROR:
+		session_id = &info->response.data.session_id;
+		break;
+	case HAL_SESSION_EVENT_CHANGE:
+		session_id = &info->response.event.session_id;
+		break;
+	case HAL_RESPONSE_UNUSED:
+	default:
+		session_id = NULL;
+		break;
+	}
+	return session_id;
+}
+
 static int __response_handler(struct venus_hfi_device *device)
 {
 	struct msm_cvp_cb_info *packets;
@@ -3249,7 +3345,6 @@
 	while (!__iface_msgq_read(device, raw_packet)) {
 		void **session_id = NULL;
 		struct msm_cvp_cb_info *info = &packets[packet_count++];
-		struct cvp_hal_sys_init_done sys_init_done = {0};
 		int rc = 0;
 
 		rc = cvp_hfi_process_msg_packet(device->device_id,
@@ -3262,59 +3357,9 @@
 		}
 
 		/* Process the packet types that we're interested in */
-		switch (info->response_type) {
-		case HAL_SYS_ERROR:
-			__process_sys_error(device);
-			break;
-		case HAL_SYS_RELEASE_RESOURCE_DONE:
-			dprintk(CVP_DBG, "Received SYS_RELEASE_RESOURCE\n");
-			break;
-		case HAL_SYS_INIT_DONE:
-			dprintk(CVP_DBG, "Received SYS_INIT_DONE\n");
+		process_system_msg(info, device, raw_packet);
 
-			sys_init_done.capabilities =
-				device->sys_init_capabilities;
-			cvp_hfi_process_sys_init_done_prop_read(
-				(struct hfi_msg_sys_init_done_packet *)
-					raw_packet, &sys_init_done);
-			info->response.cmd.data.sys_init_done = sys_init_done;
-			break;
-		default:
-			break;
-		}
-
-		/* For session-related packets, validate session */
-		switch (info->response_type) {
-		case HAL_SESSION_INIT_DONE:
-		case HAL_SESSION_END_DONE:
-		case HAL_SESSION_ABORT_DONE:
-		case HAL_SESSION_STOP_DONE:
-		case HAL_SESSION_FLUSH_DONE:
-		case HAL_SESSION_SET_BUFFER_DONE:
-		case HAL_SESSION_SUSPEND_DONE:
-		case HAL_SESSION_RESUME_DONE:
-		case HAL_SESSION_SET_PROP_DONE:
-		case HAL_SESSION_GET_PROP_DONE:
-		case HAL_SESSION_RELEASE_BUFFER_DONE:
-		case HAL_SESSION_REGISTER_BUFFER_DONE:
-		case HAL_SESSION_UNREGISTER_BUFFER_DONE:
-		case HAL_SESSION_DFS_CONFIG_CMD_DONE:
-		case HAL_SESSION_DFS_FRAME_CMD_DONE:
-		case HAL_SESSION_PROPERTY_INFO:
-			session_id = &info->response.cmd.session_id;
-			break;
-		case HAL_SESSION_ERROR:
-			session_id = &info->response.data.session_id;
-			break;
-		case HAL_SESSION_EVENT_CHANGE:
-			session_id = &info->response.event.session_id;
-			break;
-		case HAL_RESPONSE_UNUSED:
-		default:
-			session_id = NULL;
-			break;
-		}
-
+		session_id = get_session_id(info);
 		/*
 		 * hfi_process_msg_packet provides a session_id that's a hashed
 		 * value of struct hal_session, we need to coerce the hashed
@@ -3333,8 +3378,7 @@
 			session = __get_session(device,
 					(u32)(uintptr_t)*session_id);
 			if (!session) {
-				dprintk(CVP_ERR,
-						"Received a packet (%#x) for an unrecognized session (%pK), discarding\n",
+				dprintk(CVP_ERR, _INVALID_MSG_,
 						info->response_type,
 						*session_id);
 				--packet_count;
@@ -3347,7 +3391,7 @@
 		if (packet_count >= cvp_max_packets &&
 				__get_q_size(device, CVP_IFACEQ_MSGQ_IDX)) {
 			dprintk(CVP_WARN,
-					"Too many packets in message queue to handle at once, deferring read\n");
+				"Too many packets in message queue!\n");
 			break;
 		}
 
@@ -3420,8 +3464,7 @@
 
 		if (!__core_in_valid_state(device)) {
 			dprintk(CVP_ERR,
-				"Ignore responses from %d to %d as device is in invalid state",
-				(i + 1), num_responses);
+				_INVALID_STATE_, (i + 1), num_responses);
 			break;
 		}
 		dprintk(CVP_DBG, "Processing response %d of %d, type %d\n",
@@ -3732,9 +3775,8 @@
 				&bus->devfreq_prof, bus->governor, NULL);
 		if (IS_ERR_OR_NULL(bus->devfreq)) {
 			rc = PTR_ERR(bus->devfreq) ?: -EBADHANDLE;
-			dprintk(CVP_ERR,
-					"Failed to add devfreq device for bus %s and governor %s: %d\n",
-					bus->name, bus->governor, rc);
+			dprintk(CVP_ERR, _DEVFREQ_FAIL_,
+				bus->name, bus->governor, rc);
 			bus->devfreq = NULL;
 			goto err_add_dev;
 		}
@@ -3969,16 +4011,30 @@
 	dprintk(CVP_DBG, "Enabling regulators\n");
 
 	venus_hfi_for_each_regulator(device, rinfo) {
-		rc = regulator_enable(rinfo->regulator);
-		if (rc) {
-			dprintk(CVP_ERR,
-					"Failed to enable %s: %d\n",
-					rinfo->name, rc);
-			goto err_reg_enable_failed;
-		}
+		if (rinfo->has_hw_power_collapse) {
+			rc = regulator_set_mode(rinfo->regulator,
+				REGULATOR_MODE_FAST);
+			if (rc) {
+				dprintk(CVP_ERR,
+					"Failed to enable hwctrl%s: %d\n",
+						rinfo->name, rc);
+				goto err_reg_enable_failed;
+			}
+			dprintk(CVP_DBG, "Enabled regulator %s hw ctrl\n",
+					rinfo->name);
 
-		dprintk(CVP_DBG, "Enabled regulator %s\n",
-				rinfo->name);
+		} else {
+			rc = regulator_enable(rinfo->regulator);
+			if (rc) {
+				dprintk(CVP_ERR,
+						"Failed to enable %s: %d\n",
+						rinfo->name, rc);
+				goto err_reg_enable_failed;
+			}
+
+			dprintk(CVP_DBG, "Enabled regulator %s\n",
+					rinfo->name);
+		}
 		c++;
 	}
 
@@ -3997,8 +4053,12 @@
 
 	dprintk(CVP_DBG, "Disabling regulators\n");
 
-	venus_hfi_for_each_regulator_reverse(device, rinfo)
+	venus_hfi_for_each_regulator_reverse(device, rinfo) {
 		__disable_regulator(rinfo, device);
+		if (rinfo->has_hw_power_collapse)
+			regulator_set_mode(rinfo->regulator,
+				REGULATOR_MODE_NORMAL);
+	}
 
 	return 0;
 }
@@ -4085,7 +4145,7 @@
 	rc = __scale_clocks(device);
 	if (rc) {
 		dprintk(CVP_WARN,
-				"Failed to scale clocks, performance might be affected\n");
+			"Failed to scale clocks, perf may regress\n");
 		rc = 0;
 	}
 
@@ -4647,15 +4707,15 @@
 	hdev->session_clean = venus_hfi_session_clean;
 	hdev->session_set_buffers = venus_hfi_session_set_buffers;
 	hdev->session_release_buffers = venus_hfi_session_release_buffers;
-	hdev->session_register_buffer = venus_hfi_session_register_buffer;
-	hdev->session_unregister_buffer = venus_hfi_session_unregister_buffer;
 	hdev->session_start = venus_hfi_session_start;
 	hdev->session_continue = venus_hfi_session_continue;
 	hdev->session_stop = venus_hfi_session_stop;
 	hdev->session_get_buf_req = venus_hfi_session_get_buf_req;
 	hdev->session_cvp_dfs_config = venus_hfi_session_cvp_dfs_config;
 	hdev->session_cvp_dfs_frame = venus_hfi_session_cvp_dfs_frame;
-	hdev->session_cvp_send_cmd = venus_hfi_session_cvp_send_cmd;
+	hdev->session_cvp_dme_config = venus_hfi_session_cvp_dme_config;
+	hdev->session_cvp_dme_frame = venus_hfi_session_cvp_dme_frame;
+	hdev->session_cvp_persist = venus_hfi_session_cvp_persist;
 	hdev->session_flush = venus_hfi_session_flush;
 	hdev->session_set_property = venus_hfi_session_set_property;
 	hdev->session_get_property = venus_hfi_session_get_property;
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.h b/drivers/media/platform/msm/cvp/cvp_hfi.h
index e19532d..8a3de61 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.h
@@ -326,10 +326,6 @@
 #define HFI_CMD_SESSION_CVP_START	\
 	(HFI_DOMAIN_BASE_CVP + HFI_ARCH_COMMON_OFFSET +	\
 	HFI_CMD_START_OFFSET + 0x1000)
-#define HFI_CMD_SESSION_REGISTER_BUFFERS	\
-	(HFI_CMD_SESSION_CVP_START + 0x0A0)
-#define HFI_CMD_SESSION_UNREGISTER_BUFFERS	\
-	(HFI_CMD_SESSION_CVP_START + 0x0A1)
 
 /* =============BASIC OPERATIONS=================*/
 #define  HFI_CMD_SESSION_CVP_SET_BUFFERS\
@@ -384,6 +380,13 @@
 #define HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG\
 	(HFI_CMD_SESSION_CVP_START + 0x017)
 
+#define  HFI_CMD_SESSION_CVP_DME_CONFIG\
+	(HFI_CMD_SESSION_CVP_START + 0x039)
+#define  HFI_CMD_SESSION_CVP_DME_FRAME\
+	(HFI_CMD_SESSION_CVP_START + 0x03A)
+
+#define  HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS\
+	(HFI_CMD_SESSION_CVP_START + 0x04D)
 
 #define HFI_MSG_SYS_OX_START			\
 (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + HFI_MSG_START_OFFSET + 0x0000)
@@ -409,10 +412,6 @@
 #define HFI_MSG_SESSION_CVP_START	\
 	(HFI_DOMAIN_BASE_CVP + HFI_ARCH_COMMON_OFFSET +	\
 	HFI_MSG_START_OFFSET + 0x1000)
-#define HFI_MSG_SESSION_REGISTER_BUFFERS_DONE	\
-	(HFI_MSG_SESSION_CVP_START + 0x0A0)
-#define HFI_MSG_SESSION_UNREGISTER_BUFFERS_DONE	\
-	(HFI_MSG_SESSION_CVP_START + 0x0A1)
 
 /* =================BASIC OPERATIONS=================*/
 #define HFI_MSG_SESSION_CVP_SET_BUFFERS\
@@ -436,7 +435,12 @@
 #define HFI_MSG_SESSION_CVP_FTEXT\
 	(HFI_MSG_SESSION_CVP_START + 0x00A)
 
-#define HFI_MSG_SESSION_CVP_OPERATION_CONFIG (HFI_MSG_SESSION_CVP_START + 0x010)
+#define HFI_MSG_SESSION_CVP_DME\
+	(HFI_MSG_SESSION_CVP_START + 0x023)
+#define HFI_MSG_SESSION_CVP_OPERATION_CONFIG (HFI_MSG_SESSION_CVP_START + 0x030)
+
+#define  HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS\
+	(HFI_MSG_SESSION_CVP_START + 0x034)
 
 #define CVP_IFACEQ_MAX_PKT_SIZE       1024
 #define CVP_IFACEQ_MED_PKT_SIZE       768
@@ -577,36 +581,40 @@
 	u32 buf_size[HFI_MAX_PLANES];
 };
 
-struct hfi_cmd_session_cvp_dfs_config {
+struct hfi_cmd_session_hdr {
 	u32 size;
 	u32 packet_type;
 	u32 session_id;
-	u32 srcbuffer_format;
-	struct HFI_CVP_COLOR_PLANE_INFO left_plane_info;
-	struct HFI_CVP_COLOR_PLANE_INFO right_plane_info;
-	u32 width;
-	u32 height;
-	u32 occlusionmask_enable;
-	u32 occlusioncost;
-	u32 occlusionbound;
-	u32 occlusionshift;
-	u32 maxdisparity;
-	u32 disparityoffset;
-	u32 medianfilter_enable;
-	u32 occlusionfilling_enable;
-	u32 occlusionmaskdump;
-	struct hfi_cvp_client_data  clientdata;
 };
 
-struct hfi_cmd_session_cvp_dfs_frame {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 left_buffer_index;
-	u32 right_buffer_index;
-	u32 disparitymap_buffer_idx;
-	u32 occlusionmask_buffer_idx;
-	struct hfi_cvp_client_data  clientdata;
+struct hfi_cmd_session_cvp_dfs_config_packet {
+	u32 cvp_internal_dfs_config[CVP_DFS_CONFIG_CMD_SIZE];
+};
+
+struct hfi_cmd_session_cvp_dfs_frame_packet {
+	u32 cvp_dfs_frame[CVP_DFS_FRAME_BUFFERS_OFFSET];
+	u32 left_view_buffer_addr;
+	u32 left_view_buffer_size;
+	u32 right_view_buffer_addr;
+	u32 right_view_buffer_size;
+	u32 disparity_map_buffer_addr;
+	u32 disparity_map_buffer_size;
+	u32 occlusion_mask_buffer_addr;
+	u32 occlusion_mask_buffer_size;
+};
+
+struct hfi_cmd_session_cvp_dme_config_packet {
+	u32 cvp_internal_dme_config[CVP_DME_CONFIG_CMD_SIZE];
+};
+
+struct hfi_cmd_session_cvp_dme_frame_packet {
+	u32 cvp_dme_frame[CVP_DME_FRAME_BUFFERS_OFFSET];
+	struct buf_desc bufs[8];
+};
+
+struct hfi_cmd_session_cvp_persist_packet {
+	u32 cvp_persist_frame[CVP_PERSIST_BUFFERS_OFFSET];
+	struct buf_desc bufs[CVP_PSRSIST_BUF_NUM];
 };
 
 struct hfi_cmd_session_release_buffer_packet {
@@ -814,22 +822,6 @@
 	u32 rg_buffer_info[1];
 };
 
-struct hfi_msg_session_register_buffers_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 client_data;
-	u32 error_type;
-};
-
-struct hfi_msg_session_unregister_buffers_done_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 client_data;
-	u32 error_type;
-};
-
 struct hfi_extradata_mb_quantization_payload {
 	u8 rg_mb_qp[1];
 };
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_api.h b/drivers/media/platform/msm/cvp/cvp_hfi_api.h
index 1e3ef88..dae783a 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_api.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_api.h
@@ -904,24 +904,6 @@
 	void *resource_handle;
 };
 
-struct cvp_register_buffer {
-	enum hal_buffer type;
-	u32 index;
-	u32 size;
-	u32 device_addr;
-	u32 response_required;
-	u32 client_data;
-};
-
-struct cvp_unregister_buffer {
-	enum hal_buffer type;
-	u32 index;
-	u32 size;
-	u32 device_addr;
-	u32 response_required;
-	u32 client_data;
-};
-
 struct cvp_buffer_addr_info {
 	enum hal_buffer buffer_type;
 	u32 buffer_size;
@@ -1118,7 +1100,6 @@
 	HAL_SESSION_ABORT_DONE,
 	HAL_SESSION_STOP_DONE,
 	HAL_SESSION_CVP_OPERATION_CONFIG,
-	HAL_SESSION_CVP_DFS,
 	HAL_SESSION_FLUSH_DONE,
 	HAL_SESSION_SUSPEND_DONE,
 	HAL_SESSION_RESUME_DONE,
@@ -1130,6 +1111,9 @@
 	HAL_SESSION_RELEASE_RESOURCE_DONE,
 	HAL_SESSION_DFS_CONFIG_CMD_DONE,
 	HAL_SESSION_DFS_FRAME_CMD_DONE,
+	HAL_SESSION_DME_CONFIG_CMD_DONE,
+	HAL_SESSION_DME_FRAME_CMD_DONE,
+	HAL_SESSION_PERSIST_CMD_DONE,
 	HAL_SESSION_PROPERTY_INFO,
 	HAL_SESSION_ERROR,
 	HAL_RESPONSE_UNUSED = 0x10000000,
@@ -1278,8 +1262,6 @@
 		struct cvp_hal_sys_init_done sys_init_done;
 		struct cvp_hal_session_init_done session_init_done;
 		struct hal_buffer_info buffer_info;
-		struct cvp_register_buffer regbuf;
-		struct cvp_unregister_buffer unregbuf;
 		union hal_get_property property;
 		enum hal_flush flush_type;
 	} data;
@@ -1428,6 +1410,76 @@
 #define call_hfi_op(q, op, args...)			\
 	(((q) && (q)->op) ? ((q)->op(args)) : 0)
 
+/* DFS related structures */
+struct msm_cvp_internal_dfsconfig {
+	struct list_head list;
+	struct msm_smem smem;
+	struct msm_cvp_dfs_config dfs_config;
+};
+
+struct	buf_desc {
+	u32 fd;
+	u32 size;
+};
+
+/**
+ * struct msm_cvp_dfs_frame_kmd - argument passed with VIDIOC_CVP_CMD
+ * @cvp_dfs_frame:                parameters for DFS frame command
+ * @left_view_buffer_fd:          fd for left view buffer
+ * @left_view_buffer_size:        size for left view buffer
+ * @right_view_buffer_fd:         fd for right view buffer
+ * @right_view_buffer_size:       size for right view buffer
+ * @disparity_map_buffer_fd:      fd for disparity map buffer
+ * @disparity_map_buffer_size:    size for disparity map buffer
+ * @occlusion_mask_buffer_fd:     fd for occlusion mask buffer
+ * @occlusion_mask_buffer_size:   size for occlusion mask buffer
+ */
+
+struct msm_cvp_dfs_frame_kmd {
+	unsigned int cvp_dfs_frame[CVP_DFS_FRAME_BUFFERS_OFFSET];
+	unsigned int left_view_buffer_fd;
+	unsigned int left_view_buffer_size;
+	unsigned int right_view_buffer_fd;
+	unsigned int right_view_buffer_size;
+	unsigned int disparity_map_buffer_fd;
+	unsigned int disparity_map_buffer_size;
+	unsigned int occlusion_mask_buffer_fd;
+	unsigned int occlusion_mask_buffer_size;
+};
+
+
+struct msm_cvp_internal_dfsframe {
+	struct list_head list;
+	struct msm_cvp_dfs_frame_kmd dfs_frame;
+};
+
+/* DME related structures */
+struct msm_cvp_internal_dmeconfig {
+	struct list_head list;
+	struct msm_smem smem;
+	struct msm_cvp_dme_config dme_config;
+};
+
+struct msm_cvp_dme_frame_kmd {
+	unsigned int cvp_dme_frame[CVP_DME_FRAME_BUFFERS_OFFSET];
+	struct buf_desc bufs[CVP_DME_BUF_NUM];
+};
+
+struct msm_cvp_internal_dmeframe {
+	struct list_head list;
+	struct msm_cvp_dme_frame_kmd dme_frame;
+};
+
+struct msm_cvp_persist_kmd {
+	unsigned int cvp_pcmd[CVP_PERSIST_BUFFERS_OFFSET];
+	struct buf_desc bufs[CVP_PSRSIST_BUF_NUM];
+};
+
+struct msm_cvp_internal_persist_cmd {
+	struct list_head list;
+	struct msm_cvp_persist_kmd persist_cmd;
+};
+
 struct hfi_device {
 	void *hfi_device_data;
 
@@ -1445,10 +1497,6 @@
 				struct cvp_buffer_addr_info *buffer_info);
 	int (*session_release_buffers)(void *sess,
 				struct cvp_buffer_addr_info *buffer_info);
-	int (*session_register_buffer)(void *sess,
-				struct cvp_register_buffer *buffer);
-	int (*session_unregister_buffer)(void *sess,
-				struct cvp_unregister_buffer *buffer);
 	int (*session_load_res)(void *sess);
 	int (*session_release_res)(void *sess);
 	int (*session_start)(void *sess);
@@ -1457,11 +1505,15 @@
 	int (*session_cvp_operation_config)(void *sess,
 		struct cvp_frame_data *input_frame);
 	int (*session_cvp_dfs_config)(void *sess,
-		struct msm_cvp_dfsconfig *dfs_config);
+		struct msm_cvp_internal_dfsconfig *dfs_config);
 	int (*session_cvp_dfs_frame)(void *sess,
-		struct msm_cvp_dfsframe *dfs_frame);
-	int (*session_cvp_send_cmd)(void *sess,
-		struct cvp_frame_data *input_frame);
+		struct msm_cvp_internal_dfsframe *dfs_frame);
+	int (*session_cvp_dme_config)(void *sess,
+		struct msm_cvp_internal_dmeconfig *dme_config);
+	int (*session_cvp_dme_frame)(void *sess,
+		struct msm_cvp_internal_dmeframe *dme_frame);
+	int (*session_cvp_persist)(void *sess,
+		struct msm_cvp_internal_persist_cmd *pbuf_cmd);
 	int (*session_get_buf_req)(void *sess);
 	int (*session_flush)(void *sess, enum hal_flush flush_mode);
 	int (*session_set_property)(void *sess, enum hal_property ptype,
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
index f43be67..681ef00 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
@@ -1036,26 +1036,6 @@
 	u32 size;
 };
 
-struct hfi_cmd_session_register_buffers_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 client_data;
-	u32 response_req;
-	u32 num_buffers;
-	struct hfi_buffer_mapping_type buffer[1];
-};
-
-struct hfi_cmd_session_unregister_buffers_packet {
-	u32 size;
-	u32 packet_type;
-	u32 session_id;
-	u32 client_data;
-	u32 response_req;
-	u32 num_buffers;
-	struct hfi_buffer_mapping_type buffer[1];
-};
-
 struct hfi_cmd_session_sync_process_packet {
 	u32 size;
 	u32 packet_type;
@@ -1084,24 +1064,76 @@
 	u32 size;
 	u32 packet_type;
 	u32 session_id;
+	struct hfi_cvp_client_data  client_data;
 	u32 op_conf_id;
 	u32 error_type;
-	struct hfi_cvp_client_data  client_data;
 };
 
 struct hfi_msg_session_cvp_dfs_packet_type {
 	u32 size;
 	u32 packet_type;
 	u32 session_id;
+	struct hfi_cvp_client_data  client_data;
 	u32 error_type;
-	u32 left_view_buffer_index;
-	u32 right_view_buffer_index;
 	u32 width;
 	u32 height;
 	u32 occlusionmask_enable;
-	u32 disparitymap_buffer_idx;
-	u32 occlusionmask_buffer_idx;
+	u32 left_view_buf_addr;
+	u32 left_view_buf_size;
+	u32 right_view_buf_addr;
+	u32 right_view_buf_size;
+	u32 disparity_map_buf_addr;
+	u32 disparity_map_buf_size;
+	u32 occ_mask_buf_addr;
+	u32 occ_mask_buf_size;
+};
+
+struct hfi_msg_session_cvp_dme_packet_type {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
 	struct hfi_cvp_client_data  client_data;
+	u32 error_type;
+	u32 skip_mv_calc;
+	u32 src_buf_addr;
+	u32 src_buf_size;
+	u32 nSrcBuffer_size;
+	u32 src_frame_ctx_buf_addr;
+	u32 src_frame_ctx_buf_size;
+	u32 ref_buf_addr;
+	u32 ref_buf_size;
+	u32 ref_frame_ctxt_buf_addr;
+	u32 ref_frame_ctxt_buf_size;
+	u32 video_spatial_temp_status_buf_addr;
+	u32 video_spatial_temp_status_buf_size;
+	u32 full_res_height;
+	u32 full_res_width;
+	u32 proc_frame_width;
+	u32 proc_frame_height;
+	u32 transform_confidence;
+	u32 frame_sum_gradient;
+	u32 frame_sum_square_gradient1;
+	u32 frame_sum_square_gradient2;
+	u16 n_luma_hist[8];
+	s32 mvx_sum;
+	s32 mvy_Sum;
+	u32 num_mvs;
+	s32 final_transform[9];
+	s32 pad;
+	s64 transform[9];
+};
+
+struct hfi_msg_session_cvp_persist_packet_type {
+	u32 size;
+	u32 packet_type;
+	u32 session_id;
+	struct hfi_cvp_client_data  client_data;
+	u32 error_type;
+	u32 cvp_op;
+	u32 persist1_buffer_fd;
+	u32 persist1_size;
+	u32 persist2_fd;
+	u32 persist2_size;
 };
 
 struct hfi_msg_release_buffer_ref_event_packet {
diff --git a/drivers/media/platform/msm/cvp/hfi_packetization.c b/drivers/media/platform/msm/cvp/hfi_packetization.c
index e675f8a..21d9bcc 100644
--- a/drivers/media/platform/msm/cvp/hfi_packetization.c
+++ b/drivers/media/platform/msm/cvp/hfi_packetization.c
@@ -666,79 +666,24 @@
 
 	pkt->packet_type = HFI_CMD_SESSION_CVP_RELEASE_BUFFERS;
 	pkt->session_id = hash32_ptr(session);
-	pkt->buffer_type = 0xdeadbeef;
-	pkt->num_buffers = 0;
-	pkt->size = sizeof(struct hfi_cmd_session_cvp_release_buffers_packet);
+	pkt->num_buffers = buffer_info->num_buffers;
+
+	if (buffer_info->buffer_type == HAL_BUFFER_OUTPUT ||
+		buffer_info->buffer_type == HAL_BUFFER_OUTPUT2) {
+		dprintk(CVP_ERR, "%s: deprecated buffer_type\n", __func__);
+		return -EINVAL;
+	}
+
+	pkt->size = sizeof(struct hfi_cmd_session_set_buffers_packet) +
+		((buffer_info->num_buffers - 1) * sizeof(u32));
+
+	pkt->buffer_type = get_hfi_buffer(buffer_info->buffer_type);
+	if (!pkt->buffer_type)
+		return -EINVAL;
 
 	return 0;
 }
 
-int cvp_create_pkt_cmd_session_register_buffer(
-		struct hfi_cmd_session_register_buffers_packet *pkt,
-		struct hal_session *session,
-		struct cvp_register_buffer *buffer)
-{
-	int rc = 0, i;
-	struct hfi_buffer_mapping_type *buf;
-
-	if (!pkt || !session) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	pkt->packet_type = HFI_CMD_SESSION_REGISTER_BUFFERS;
-	pkt->session_id = hash32_ptr(session);
-	pkt->client_data = buffer->client_data;
-	pkt->response_req = buffer->response_required;
-	pkt->num_buffers = 1;
-	pkt->size = sizeof(struct hfi_cmd_session_register_buffers_packet) -
-			sizeof(u32) + (pkt->num_buffers *
-			sizeof(struct hfi_buffer_mapping_type));
-
-	buf = (struct hfi_buffer_mapping_type *)pkt->buffer;
-	for (i = 0; i < pkt->num_buffers; i++) {
-		buf->index = buffer->index;
-		buf->device_addr = buffer->device_addr;
-		buf->size = buffer->size;
-		buf++;
-	}
-
-	return rc;
-}
-
-int cvp_create_pkt_cmd_session_unregister_buffer(
-		struct hfi_cmd_session_unregister_buffers_packet *pkt,
-		struct hal_session *session,
-		struct cvp_unregister_buffer *buffer)
-{
-	int rc = 0, i;
-	struct hfi_buffer_mapping_type *buf;
-
-	if (!pkt || !session) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	pkt->packet_type = HFI_CMD_SESSION_UNREGISTER_BUFFERS;
-	pkt->session_id = hash32_ptr(session);
-	pkt->client_data = buffer->client_data;
-	pkt->response_req = buffer->response_required;
-	pkt->num_buffers = 1;
-	pkt->size = sizeof(struct hfi_cmd_session_unregister_buffers_packet) -
-			sizeof(u32) + (pkt->num_buffers *
-			sizeof(struct hfi_buffer_mapping_type));
-
-	buf = (struct hfi_buffer_mapping_type *)pkt->buffer;
-	for (i = 0; i < pkt->num_buffers; i++) {
-		buf->index = buffer->index;
-		buf->device_addr = buffer->device_addr;
-		buf->size = buffer->size;
-		buf++;
-	}
-
-	return rc;
-}
-
 int cvp_create_pkt_cmd_session_etb_decoder(
 	struct hfi_cmd_session_empty_buffer_compressed_packet *pkt,
 	struct hal_session *session, struct cvp_frame_data *input_frame)
@@ -865,131 +810,167 @@
 }
 
 int cvp_create_pkt_cmd_session_cvp_dfs_config(
-		struct hfi_cmd_session_cvp_dfs_config *pkt,
+		struct hfi_cmd_session_cvp_dfs_config_packet *pkt,
 		struct hal_session *session,
-		struct msm_cvp_dfsconfig *dfs_config)
+		struct msm_cvp_internal_dfsconfig *dfs_config)
 {
-	int rc = 0, i = 0;
+	struct hfi_cmd_session_hdr *ptr =
+		(struct hfi_cmd_session_hdr *)pkt;
 
 	if (!pkt || !session)
 		return -EINVAL;
 
-	pkt->size = sizeof(struct hfi_cmd_session_cvp_dfs_config);
-	pkt->packet_type = HFI_CMD_SESSION_CVP_DFS_CONFIG;
-	pkt->session_id = hash32_ptr(session);
-	pkt->srcbuffer_format = dfs_config->srcbuffer_format;
-	for (i = 0; i < HFI_MAX_PLANES; i++) {
-		pkt->left_plane_info.stride[i] =
-			dfs_config->left_plane_info.stride[i];
-		pkt->left_plane_info.buf_size[i] =
-			dfs_config->left_plane_info.buf_size[i];
-		pkt->right_plane_info.stride[i] =
-			dfs_config->right_plane_info.stride[i];
-		pkt->right_plane_info.buf_size[i] =
-			dfs_config->right_plane_info.buf_size[i];
-	}
-	pkt->width = dfs_config->width;
-	pkt->height = dfs_config->height;
-	pkt->occlusionmask_enable = dfs_config->occlusionmask_enable;
-	pkt->occlusioncost = dfs_config->occlusioncost;
-	pkt->occlusionbound = dfs_config->occlusionbound;
-	pkt->occlusionshift = dfs_config->occlusionshift;
-	pkt->maxdisparity = dfs_config->maxdisparity;
-	pkt->disparityoffset = dfs_config->disparityoffset;
-	pkt->medianfilter_enable = dfs_config->medianfilter_enable;
-	pkt->occlusionfilling_enable = dfs_config->occlusionfilling_enable;
-	pkt->occlusionmaskdump = dfs_config->occlusionmaskdump;
-	pkt->clientdata.transactionid =
-		dfs_config->clientdata.transactionid;
-	pkt->clientdata.client_data1  =
-		dfs_config->clientdata.client_data1;
-	pkt->clientdata.client_data2  =
-		dfs_config->clientdata.client_data2;
+	memcpy(pkt, &dfs_config->dfs_config,
+		CVP_DFS_CONFIG_CMD_SIZE*sizeof(unsigned int));
 
-	dprintk(CVP_DBG,
-		"%s: size=%d packet_type=%d session_id=%d height=%d", __func__,
-		pkt->size, pkt->packet_type, pkt->session_id, pkt->height);
-	dprintk(CVP_DBG,
-		"occlusionmask_enable=%d occlusioncost=%d occlusionbound=%d",
-		pkt->occlusionmask_enable, pkt->occlusioncost,
-		pkt->occlusionbound);
-	dprintk(CVP_DBG,
-		"occlusionshift=%d maxdisparity=%d disparityoffset=%d",
-		pkt->occlusionshift, pkt->maxdisparity,
-		pkt->disparityoffset);
-	dprintk(CVP_DBG,
-		"medianfilter_enable=%d occlusionfilling_enable=%d occlusionmaskdump=%d",
-		pkt->medianfilter_enable, pkt->occlusionfilling_enable,
-		pkt->occlusionmaskdump);
-	dprintk(CVP_DBG,
-		"left_plane_info.ActualStride[HFI_COLOR_PLANE_PICDATA]:%u",
-		pkt->left_plane_info.stride[HFI_COLOR_PLANE_PICDATA]
-		);
-	dprintk(CVP_DBG,
-		"LeftViewColPlaneInfo.ActualStride[HFI_COLOR_PLANE_METADATA]:%u",
-		pkt->left_plane_info.stride[HFI_COLOR_PLANE_METADATA]
-		);
-	dprintk(CVP_DBG,
-		"LeftViewColPlaneInfo.ActualBufSize[HFI_COLOR_PLANE_PICDATA]:%u,",
-		pkt->left_plane_info.buf_size[HFI_COLOR_PLANE_PICDATA]
-		);
-	dprintk(CVP_DBG,
-		"LeftViewColPlaneInfo.ActualBufSize[HFI_COLOR_PLANE_METADATA]%u",
-		pkt->left_plane_info.buf_size[HFI_COLOR_PLANE_METADATA]
-		);
-	dprintk(CVP_DBG,
-		"RightViewColPlaneInfo:stride[HFI_COLOR_PLANE_PICDATA]:%u",
-		pkt->right_plane_info.stride[HFI_COLOR_PLANE_PICDATA]
-		);
-	dprintk(CVP_DBG,
-		"RightViewColPlaneInfo.stride[HFI_COLOR_PLANE_METADATA]:%u",
-		pkt->right_plane_info.stride[HFI_COLOR_PLANE_METADATA]
-		);
-	dprintk(CVP_DBG,
-		"RightViewColPlaneInfo.ActualBufSize[HFI_COLOR_PLANE_PICDATA] :%u",
-		pkt->right_plane_info.buf_size[HFI_COLOR_PLANE_PICDATA]
-		);
-	dprintk(CVP_DBG,
-	"RightViewColPlaneInfo.ActualBufSize[HFI_COLOR_PLANE_METADATA] %u",
-	pkt->right_plane_info.buf_size[HFI_COLOR_PLANE_METADATA]
-	);
+	if (ptr->size != CVP_DFS_CONFIG_CMD_SIZE*sizeof(unsigned int))
+		goto error_dfs_config;
 
-	return rc;
+	if (ptr->packet_type != HFI_CMD_SESSION_CVP_DFS_CONFIG)
+		goto error_dfs_config;
+
+	if (ptr->session_id != hash32_ptr(session))
+		goto error_dfs_config;
+
+	return 0;
+
+error_dfs_config:
+	dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
+		__func__, ptr->size, ptr->packet_type, ptr->session_id);
+
+	return -EINVAL;
 }
 
 
 int cvp_create_pkt_cmd_session_cvp_dfs_frame(
-		struct hfi_cmd_session_cvp_dfs_frame *pkt,
+		struct hfi_cmd_session_cvp_dfs_frame_packet *pkt,
 		struct hal_session *session,
-		struct msm_cvp_dfsframe *dfs_frame)
+		struct msm_cvp_internal_dfsframe *dfs_frame)
 {
-	int rc = 0;
+	struct hfi_cmd_session_hdr *ptr =
+		(struct hfi_cmd_session_hdr *)pkt;
 
 	if (!pkt || !session)
 		return -EINVAL;
 
-	pkt->size = sizeof(struct hfi_cmd_session_cvp_dfs_frame);
-	pkt->packet_type = HFI_CMD_SESSION_CVP_DFS_FRAME;
-	pkt->session_id = hash32_ptr(session);
-	pkt->left_buffer_index = dfs_frame->left_buffer_index;
-	pkt->right_buffer_index = dfs_frame->right_buffer_index;
-	pkt->disparitymap_buffer_idx = dfs_frame->disparitymap_buffer_idx;
-	pkt->occlusionmask_buffer_idx = dfs_frame->occlusionmask_buffer_idx;
-	pkt->clientdata.transactionid = dfs_frame->clientdata.transactionid;
-	pkt->clientdata.client_data1 = dfs_frame->clientdata.client_data1;
+	memcpy(pkt, &dfs_frame->dfs_frame,
+		CVP_DFS_FRAME_CMD_SIZE*sizeof(unsigned int));
 
-	dprintk(CVP_DBG,
-		"%s: size=%d, packet_type=%d session_id=%d left_buffer_index=%d",
-		__func__, pkt->size, pkt->packet_type, pkt->session_id,
-		pkt->left_buffer_index);
-	dprintk(CVP_DBG,
-		"right_buffer_index=%d disparitymap_buffer_idx=%d",
-		pkt->right_buffer_index, pkt->disparitymap_buffer_idx);
-	dprintk(CVP_DBG,
-		"occlusionmask_buffer_idx=%d ",
-			pkt->occlusionmask_buffer_idx);
+	if (ptr->size != CVP_DFS_FRAME_CMD_SIZE*sizeof(unsigned int))
+		goto error_dfs_frame;
 
-	return rc;
+	if (ptr->packet_type != HFI_CMD_SESSION_CVP_DFS_FRAME)
+		goto error_dfs_frame;
+
+	if (ptr->session_id != hash32_ptr(session))
+		goto error_dfs_frame;
+
+
+	return 0;
+
+error_dfs_frame:
+	dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
+		__func__, ptr->size, ptr->packet_type, ptr->session_id);
+
+	return -EINVAL;
+}
+
+int cvp_create_pkt_cmd_session_cvp_dme_config(
+		struct hfi_cmd_session_cvp_dme_config_packet *pkt,
+		struct hal_session *session,
+		struct msm_cvp_internal_dmeconfig *dme_config)
+{
+	struct hfi_cmd_session_hdr *ptr =
+		(struct hfi_cmd_session_hdr *)pkt;
+
+	if (!pkt || !session)
+		return -EINVAL;
+
+	memcpy(pkt, &dme_config->dme_config,
+		CVP_DME_CONFIG_CMD_SIZE*sizeof(unsigned int));
+
+	if (ptr->size != CVP_DME_CONFIG_CMD_SIZE*sizeof(unsigned int))
+		goto error_dme_config;
+
+	if (ptr->packet_type != HFI_CMD_SESSION_CVP_DME_CONFIG)
+		goto error_dme_config;
+
+	if (ptr->session_id != hash32_ptr(session))
+		goto error_dme_config;
+
+	return 0;
+
+error_dme_config:
+	dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
+		__func__, ptr->size, ptr->packet_type, ptr->session_id);
+
+	return -EINVAL;
+}
+
+
+int cvp_create_pkt_cmd_session_cvp_dme_frame(
+		struct hfi_cmd_session_cvp_dme_frame_packet *pkt,
+		struct hal_session *session,
+		struct msm_cvp_internal_dmeframe *dme_frame)
+{
+	struct hfi_cmd_session_hdr *ptr =
+		(struct hfi_cmd_session_hdr *)pkt;
+
+	if (!pkt || !session)
+		return -EINVAL;
+
+	memcpy(pkt, &dme_frame->dme_frame,
+		CVP_DME_FRAME_CMD_SIZE*sizeof(unsigned int));
+
+	if (ptr->size != CVP_DME_FRAME_CMD_SIZE*sizeof(unsigned int))
+		goto error_dme_frame;
+
+	if (ptr->packet_type != HFI_CMD_SESSION_CVP_DME_FRAME)
+		goto error_dme_frame;
+
+	if (ptr->session_id != hash32_ptr(session))
+		goto error_dme_frame;
+
+	return 0;
+
+error_dme_frame:
+	dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
+		__func__, ptr->size, ptr->packet_type, ptr->session_id);
+
+	return -EINVAL;
+}
+
+int cvp_create_pckt_cmd_session_cvp_persist(
+		struct hfi_cmd_session_cvp_persist_packet *pkt,
+		struct hal_session *session,
+		struct msm_cvp_internal_persist_cmd *pbuf_cmd)
+{
+	struct hfi_cmd_session_hdr *ptr =
+		(struct hfi_cmd_session_hdr *)pkt;
+
+	if (!pkt || !session)
+		return -EINVAL;
+
+	memcpy(pkt, &pbuf_cmd->persist_cmd,
+		CVP_PERSIST_CMD_SIZE*sizeof(unsigned int));
+
+	if (ptr->size != CVP_PERSIST_CMD_SIZE*sizeof(unsigned int))
+		goto error_persist;
+
+	if (ptr->packet_type != HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS)
+		goto error_persist;
+
+	if (ptr->session_id != hash32_ptr(session))
+		goto error_persist;
+
+	return 0;
+
+error_persist:
+	dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
+		__func__, ptr->size, ptr->packet_type, ptr->session_id);
+
+	return -EINVAL;
+
 }
 
 
@@ -2189,10 +2170,6 @@
 		cvp_create_pkt_cmd_session_set_buffers,
 	.session_release_buffers =
 		cvp_create_pkt_cmd_session_release_buffers,
-	.session_register_buffer =
-		cvp_create_pkt_cmd_session_register_buffer,
-	.session_unregister_buffer =
-		cvp_create_pkt_cmd_session_unregister_buffer,
 	.session_etb_decoder = cvp_create_pkt_cmd_session_etb_decoder,
 	.session_etb_encoder = cvp_create_pkt_cmd_session_etb_encoder,
 	.session_ftb = cvp_create_pkt_cmd_session_ftb,
@@ -2204,6 +2181,12 @@
 		cvp_create_pkt_cmd_session_cvp_dfs_config,
 	.session_cvp_dfs_frame =
 		cvp_create_pkt_cmd_session_cvp_dfs_frame,
+	.session_cvp_dme_config =
+		cvp_create_pkt_cmd_session_cvp_dme_config,
+	.session_cvp_dme_frame =
+		cvp_create_pkt_cmd_session_cvp_dme_frame,
+	.session_cvp_persist =
+		cvp_create_pckt_cmd_session_cvp_persist,
 };
 
 struct hfi_packetization_ops *cvp_hfi_get_pkt_ops_handle(
diff --git a/drivers/media/platform/msm/cvp/hfi_packetization.h b/drivers/media/platform/msm/cvp/hfi_packetization.h
index c876ac1..e613d8e 100644
--- a/drivers/media/platform/msm/cvp/hfi_packetization.h
+++ b/drivers/media/platform/msm/cvp/hfi_packetization.h
@@ -51,14 +51,6 @@
 		struct hfi_cmd_session_cvp_release_buffers_packet *pkt,
 		struct hal_session *session,
 		struct cvp_buffer_addr_info *buffer_info);
-	int (*session_register_buffer)(
-		struct hfi_cmd_session_register_buffers_packet *pkt,
-		struct hal_session *session,
-		struct cvp_register_buffer *buffer);
-	int (*session_unregister_buffer)(
-		struct hfi_cmd_session_unregister_buffers_packet *pkt,
-		struct hal_session *session,
-		struct cvp_unregister_buffer *buffer);
 	int (*session_etb_decoder)(
 		struct hfi_cmd_session_empty_buffer_compressed_packet *pkt,
 		struct hal_session *session,
@@ -86,13 +78,25 @@
 		struct hfi_cmd_session_sync_process_packet *pkt,
 		struct hal_session *session);
 	int (*session_cvp_dfs_config)(
-			struct hfi_cmd_session_cvp_dfs_config *pkt,
+			struct hfi_cmd_session_cvp_dfs_config_packet *pkt,
 			struct hal_session *session,
-			struct msm_cvp_dfsconfig *dfs_config);
+			struct msm_cvp_internal_dfsconfig *dfs_config);
 	int (*session_cvp_dfs_frame)(
-			struct hfi_cmd_session_cvp_dfs_frame *pkt,
+			struct hfi_cmd_session_cvp_dfs_frame_packet *pkt,
 			struct hal_session *session,
-			struct msm_cvp_dfsframe *dfs_frame);
+			struct msm_cvp_internal_dfsframe *dfs_frame);
+	int (*session_cvp_dme_config)(
+			struct hfi_cmd_session_cvp_dme_config_packet *pkt,
+			struct hal_session *session,
+			struct msm_cvp_internal_dmeconfig *dme_config);
+	int (*session_cvp_dme_frame)(
+			struct hfi_cmd_session_cvp_dme_frame_packet *pkt,
+			struct hal_session *session,
+			struct msm_cvp_internal_dmeframe *dme_frame);
+	int (*session_cvp_persist)(
+			struct hfi_cmd_session_cvp_persist_packet *pkt,
+			struct hal_session *session,
+			struct msm_cvp_internal_persist_cmd *pbuf_cmd);
 };
 
 struct hfi_packetization_ops *cvp_hfi_get_pkt_ops_handle(
diff --git a/drivers/media/platform/msm/cvp/hfi_response_handler.c b/drivers/media/platform/msm/cvp/hfi_response_handler.c
index 041d763..d0ea374 100644
--- a/drivers/media/platform/msm/cvp/hfi_response_handler.c
+++ b/drivers/media/platform/msm/cvp/hfi_response_handler.c
@@ -743,61 +743,6 @@
 
 	return 0;
 }
-
-static int hfi_process_session_register_buffer_done(u32 device_id,
-		struct hfi_msg_session_register_buffers_done_packet *pkt,
-		struct msm_cvp_cb_info *info)
-{
-	struct msm_cvp_cb_cmd_done cmd_done = {0};
-
-	if (!pkt || pkt->size <
-		sizeof(struct hfi_msg_session_register_buffers_done_packet)) {
-		dprintk(CVP_ERR, "%s: bad packet/packet size %d\n",
-			__func__, pkt ? pkt->size : 0);
-		return -E2BIG;
-	}
-	dprintk(CVP_DBG, "RECEIVED: SESSION_REGISTER_BUFFERS_DONE[%#x]\n",
-			pkt->session_id);
-
-	cmd_done.device_id = device_id;
-	cmd_done.size = sizeof(struct msm_cvp_cb_cmd_done);
-	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
-	cmd_done.status = hfi_map_err_status(pkt->error_type);
-	cmd_done.data.regbuf.client_data = pkt->client_data;
-
-	info->response_type = HAL_SESSION_REGISTER_BUFFER_DONE;
-	info->response.cmd = cmd_done;
-
-	return 0;
-}
-
-static int hfi_process_session_unregister_buffer_done(u32 device_id,
-		struct hfi_msg_session_unregister_buffers_done_packet *pkt,
-		struct msm_cvp_cb_info *info)
-{
-	struct msm_cvp_cb_cmd_done cmd_done = {0};
-
-	if (!pkt || pkt->size <
-		sizeof(struct hfi_msg_session_unregister_buffers_done_packet)) {
-		dprintk(CVP_ERR, "%s: bad packet/packet size %d\n",
-			__func__, pkt ? pkt->size : 0);
-		return -E2BIG;
-	}
-	dprintk(CVP_DBG, "RECEIVED: SESSION_UNREGISTER_BUFFERS_DONE[%#x]\n",
-			pkt->session_id);
-
-	cmd_done.device_id = device_id;
-	cmd_done.size = sizeof(struct msm_cvp_cb_cmd_done);
-	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
-	cmd_done.status = hfi_map_err_status(pkt->error_type);
-	cmd_done.data.unregbuf.client_data = pkt->client_data;
-
-	info->response_type = HAL_SESSION_UNREGISTER_BUFFER_DONE;
-	info->response.cmd = cmd_done;
-
-	return 0;
-}
-
 static int hfi_process_session_end_done(u32 device_id,
 		struct hfi_msg_sys_session_end_done_packet *pkt,
 		struct msm_cvp_cb_info *info)
@@ -924,9 +869,22 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	dprintk(CVP_DBG, "%s: device_id=%d cmd_done.status=%d\n",
-		__func__, device_id, cmd_done.status);
+	dprintk(CVP_DBG,
+		"%s: device_id=%d status=%d, sessionid=%x config=%x\n",
+		__func__, device_id, cmd_done.status,
+		cmd_done.session_id, pkt->op_conf_id);
+	switch (pkt->op_conf_id) {
+	case HFI_CMD_SESSION_CVP_DFS_CONFIG:
 	info->response_type = HAL_SESSION_DFS_CONFIG_CMD_DONE;
+		break;
+	case HFI_CMD_SESSION_CVP_DME_CONFIG:
+		info->response_type = HAL_SESSION_DME_CONFIG_CMD_DONE;
+		break;
+	default:
+		dprintk(CVP_ERR, "%s Invalid op config id\n", __func__);
+		return -EINVAL;
+	}
+
 	info->response.cmd = cmd_done;
 	return 0;
 }
@@ -951,14 +909,73 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	dprintk(CVP_DBG, "%s: device_id=%d cmd_done.status=%d\n",
-		__func__, device_id, cmd_done.status);
+	dprintk(CVP_DBG,
+		"%s: device_id=%d cmd_done.status=%d sessionid=%x\n",
+		__func__, device_id, cmd_done.status, cmd_done.session_id);
 	info->response_type = HAL_SESSION_DFS_FRAME_CMD_DONE;
 	info->response.cmd = cmd_done;
 
 	return 0;
 }
 
+static int hfi_process_session_cvp_dme(u32 device_id,
+	struct hfi_msg_session_cvp_dme_packet_type *pkt,
+	struct msm_cvp_cb_info *info)
+{
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	} else if (pkt->size < sizeof(*pkt)) {
+		dprintk(CVP_ERR,
+				"%s: bad_pkt_size\n", __func__);
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = 0;
+
+	dprintk(CVP_DBG,
+		"%s: device_id=%d cmd_done.status=%d sessionid=%x\n",
+		__func__, device_id, cmd_done.status, cmd_done.session_id);
+	info->response_type = HAL_SESSION_DME_FRAME_CMD_DONE;
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
+static int hfi_process_session_cvp_persist(u32 device_id,
+	struct hfi_msg_session_cvp_persist_packet_type *pkt,
+	struct msm_cvp_cb_info *info)
+{
+	struct msm_cvp_cb_cmd_done cmd_done = {0};
+
+	if (!pkt) {
+		dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+		return -EINVAL;
+	} else if (pkt->size < sizeof(*pkt)) {
+		dprintk(CVP_ERR,
+				"%s: bad_pkt_size\n", __func__);
+		return -E2BIG;
+	}
+
+	cmd_done.device_id = device_id;
+	cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+	cmd_done.status = hfi_map_err_status(pkt->error_type);
+	cmd_done.size = 0;
+
+	dprintk(CVP_DBG,
+		"%s: device_id=%d cmd_done.status=%d sessionid=%x\n",
+		__func__, device_id, cmd_done.status, cmd_done.session_id);
+	info->response_type = HAL_SESSION_PERSIST_CMD_DONE,
+	info->response.cmd = cmd_done;
+
+	return 0;
+}
+
 static void hfi_process_sys_get_prop_image_version(
 		struct hfi_msg_sys_property_info_packet *pkt)
 {
@@ -1065,20 +1082,12 @@
 	case HFI_MSG_SYS_SESSION_END_DONE:
 		pkt_func = (pkt_func_def)hfi_process_session_end_done;
 		break;
-	case HFI_MSG_SESSION_REGISTER_BUFFERS_DONE:
-		pkt_func = (pkt_func_def)
-			hfi_process_session_register_buffer_done;
-		break;
 	case HFI_MSG_SESSION_CVP_SET_BUFFERS:
 		pkt_func = (pkt_func_def) hfi_process_session_set_buf_done;
 		break;
 	case HFI_MSG_SESSION_CVP_RELEASE_BUFFERS:
 		pkt_func = (pkt_func_def)hfi_process_session_rel_buf_done;
 		break;
-	case HFI_MSG_SESSION_UNREGISTER_BUFFERS_DONE:
-		pkt_func = (pkt_func_def)
-			hfi_process_session_unregister_buffer_done;
-		break;
 	case HFI_MSG_SYS_SESSION_ABORT_DONE:
 		pkt_func = (pkt_func_def)hfi_process_session_abort_done;
 		break;
@@ -1093,6 +1102,17 @@
 			"Received HFI_MSG_SESSION_CVP_DFS from firmware");
 		pkt_func = (pkt_func_def)hfi_process_session_cvp_dfs;
 		break;
+	case HFI_MSG_SESSION_CVP_DME:
+		dprintk(CVP_DBG,
+			"Received HFI_MSG_SESSION_CVP_DME from firmware");
+		pkt_func = (pkt_func_def)hfi_process_session_cvp_dme;
+		break;
+	case HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS:
+		dprintk(CVP_DBG,
+			"Received HFI_MSG_SESSION_CVP_PERSIST from firmware");
+		pkt_func = (pkt_func_def)hfi_process_session_cvp_persist;
+		break;
+
 	default:
 		dprintk(CVP_DBG, "Unable to parse message: %#x\n",
 				msg_hdr->packet);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.c b/drivers/media/platform/msm/cvp/msm_cvp.c
index eb54cad..6aaef05 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp.c
@@ -57,171 +57,6 @@
 	return buftype;
 }
 
-void cvp_handle_session_register_buffer_done(enum hal_command_response cmd,
-		void *resp)
-{
-	struct msm_cvp_cb_cmd_done *response = resp;
-	struct msm_cvp_inst *inst;
-	struct msm_cvp_internal_buffer *cbuf;
-	struct v4l2_event event = {0};
-	u32 *data;
-	bool found;
-
-	if (!response) {
-		dprintk(CVP_ERR, "%s: invalid response\n", __func__);
-		return;
-	}
-	inst = cvp_get_inst(get_cvp_core(response->device_id),
-			response->session_id);
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid session %pK\n", __func__,
-			response->session_id);
-		return;
-	}
-
-	mutex_lock(&inst->cvpbufs.lock);
-	found = false;
-	list_for_each_entry(cbuf, &inst->cvpbufs.list, list) {
-		if (response->data.regbuf.client_data ==
-				cbuf->smem.device_addr) {
-			found = true;
-			break;
-		}
-	}
-	mutex_unlock(&inst->cvpbufs.lock);
-	if (!found) {
-		dprintk(CVP_ERR, "%s: client_data %x not found\n",
-			__func__, response->data.regbuf.client_data);
-		goto exit;
-	}
-	print_cvp_internal_buffer(CVP_DBG, "register_done", inst, cbuf);
-
-	event.type = V4L2_EVENT_MSM_CVP_REGISTER_BUFFER_DONE;
-	data = (u32 *)event.u.data;
-	data[0] = cbuf->buf.index;
-	data[1] = cbuf->buf.type;
-	data[2] = cbuf->buf.fd;
-	data[3] = cbuf->buf.offset;
-	v4l2_event_queue_fh(&inst->event_handler, &event);
-
-exit:
-	cvp_put_inst(inst);
-}
-
-void cvp_handle_session_unregister_buffer_done(enum hal_command_response cmd,
-		void *resp)
-{
-	int rc;
-	struct msm_cvp_cb_cmd_done *response = resp;
-	struct msm_cvp_inst *inst;
-	struct msm_cvp_internal_buffer *cbuf, *dummy;
-	struct v4l2_event event = {0};
-	u32 *data;
-	bool found;
-
-	if (!response) {
-		dprintk(CVP_ERR, "%s: invalid response\n", __func__);
-		return;
-	}
-	inst = cvp_get_inst(get_cvp_core(response->device_id),
-			response->session_id);
-	if (!inst) {
-		dprintk(CVP_ERR, "%s: invalid session %pK\n", __func__,
-			response->session_id);
-		return;
-	}
-
-	mutex_lock(&inst->cvpbufs.lock);
-	found = false;
-	list_for_each_entry_safe(cbuf, dummy, &inst->cvpbufs.list, list) {
-		if (response->data.unregbuf.client_data ==
-				cbuf->smem.device_addr) {
-			found = true;
-			break;
-		}
-	}
-	mutex_unlock(&inst->cvpbufs.lock);
-	if (!found) {
-		dprintk(CVP_ERR, "%s: client_data %x not found\n",
-			__func__, response->data.unregbuf.client_data);
-		goto exit;
-	}
-	print_cvp_internal_buffer(CVP_DBG, "unregister_done", inst, cbuf);
-
-	rc = msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
-	if (rc) {
-		print_cvp_internal_buffer(CVP_ERR, "unmap fail", inst, cbuf);
-		goto exit;
-	}
-
-	event.type = V4L2_EVENT_MSM_CVP_UNREGISTER_BUFFER_DONE;
-	data = (u32 *)event.u.data;
-	data[0] = cbuf->buf.index;
-	data[1] = cbuf->buf.type;
-	data[2] = cbuf->buf.fd;
-	data[3] = cbuf->buf.offset;
-	v4l2_event_queue_fh(&inst->event_handler, &event);
-
-	mutex_lock(&inst->cvpbufs.lock);
-	list_del(&cbuf->list);
-	mutex_unlock(&inst->cvpbufs.lock);
-	kfree(cbuf);
-	cbuf = NULL;
-exit:
-	cvp_put_inst(inst);
-}
-
-static void print_cvp_cycles(struct msm_cvp_inst *inst)
-{
-	struct msm_cvp_core *core;
-	struct msm_cvp_inst *temp;
-
-	if (!inst || !inst->core)
-		return;
-	core = inst->core;
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(temp, &core->instances, list) {
-		if (temp->session_type == MSM_CVP_CORE) {
-			dprintk(CVP_ERR, "session %#x, vpss %d ise %d\n",
-				hash32_ptr(temp->session),
-				temp->clk_data.vpss_cycles,
-				temp->clk_data.ise_cycles);
-		}
-	}
-	mutex_unlock(&core->lock);
-}
-
-static bool msm_cvp_clock_aggregation(struct msm_cvp_inst *inst,
-		u32 vpss_cycles, u32 ise_cycles)
-{
-	struct msm_cvp_core *core;
-	struct msm_cvp_inst *temp;
-	u32 total_vpss_cycles = 0;
-	u32 total_ise_cycles = 0;
-
-	if (!inst || !inst->core) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return false;
-	}
-	core = inst->core;
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(temp, &core->instances, list) {
-		if (temp->session_type == MSM_CVP_CORE) {
-			total_vpss_cycles += inst->clk_data.vpss_cycles;
-			total_ise_cycles += inst->clk_data.ise_cycles;
-		}
-	}
-	mutex_unlock(&core->lock);
-
-	if ((total_vpss_cycles > MAX_CVP_VPSS_CYCLES) ||
-		(total_ise_cycles > MAX_CVP_ISE_CYCLES))
-		return false;
-
-	return true;
-}
-
 static int msm_cvp_scale_clocks_and_bus(struct msm_cvp_inst *inst)
 {
 	int rc = 0;
@@ -267,13 +102,45 @@
 	return rc;
 }
 
+static int msm_cvp_session_get_iova_addr(
+	struct msm_cvp_inst *inst,
+	struct msm_cvp_internal_buffer *cbuf,
+	unsigned int search_fd, unsigned int search_size,
+	unsigned int *iova,
+	unsigned int *iova_size)
+{
+	bool found = false;
+
+	mutex_lock(&inst->cvpbufs.lock);
+	list_for_each_entry(cbuf, &inst->cvpbufs.list, list) {
+		if (cbuf->buf.fd == search_fd) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&inst->cvpbufs.lock);
+	if (!found)
+		return -EINVAL;
+
+	*iova = cbuf->smem.device_addr;
+	if (search_size != cbuf->buf.size) {
+		dprintk(CVP_ERR,
+			"%s:: invalid size received fd = %d\n",
+			__func__, search_fd);
+		return -EINVAL;
+	}
+	*iova_size = cbuf->buf.size;
+	return 0;
+}
+
+/* DFS feature system call handling */
 static int msm_cvp_session_cvp_dfs_config(
 	struct msm_cvp_inst *inst,
-	struct msm_cvp_dfsconfig *dfs_config)
+	struct msm_cvp_dfs_config *dfs_config)
 {
 	int rc = 0;
 	struct hfi_device *hdev;
-	struct msm_cvp_dfsconfig vdfs_config;
+	struct msm_cvp_internal_dfsconfig internal_dfs_config;
 
 	dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
 
@@ -282,10 +149,11 @@
 		return -EINVAL;
 	}
 	hdev = inst->core->device;
-	memcpy(&vdfs_config, dfs_config, sizeof(struct msm_cvp_dfsconfig));
+	memcpy(&internal_dfs_config.dfs_config.cvp_dfs_config,
+		dfs_config,	sizeof(struct msm_cvp_dfs_config));
 
 	rc = call_hfi_op(hdev, session_cvp_dfs_config,
-			(void *)inst->session, &vdfs_config);
+			(void *)inst->session, &internal_dfs_config);
 	if (!rc) {
 		rc = wait_for_sess_signal_receipt(inst,
 			HAL_SESSION_DFS_CONFIG_CMD_DONE);
@@ -303,11 +171,14 @@
 
 static int msm_cvp_session_cvp_dfs_frame(
 	struct msm_cvp_inst *inst,
-	struct msm_cvp_dfsframe *dfs_frame)
+	struct msm_cvp_dfs_frame *dfs_frame)
 {
 	int rc = 0;
 	struct hfi_device *hdev;
-	struct msm_cvp_dfsframe vdfs_frame;
+	struct msm_cvp_internal_dfsframe internal_dfs_frame;
+	struct msm_cvp_dfs_frame_kmd *dest_ptr = &internal_dfs_frame.dfs_frame;
+	struct msm_cvp_dfs_frame_kmd src_frame;
+	struct msm_cvp_internal_buffer *cbuf;
 
 	dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
 
@@ -315,11 +186,60 @@
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
+	src_frame = *(struct msm_cvp_dfs_frame_kmd *)dfs_frame;
 	hdev = inst->core->device;
-	memcpy(&vdfs_frame, dfs_frame, sizeof(vdfs_frame));
+	memset(&internal_dfs_frame, 0,
+		sizeof(struct msm_cvp_internal_dfsframe));
+
+	memcpy(&internal_dfs_frame.dfs_frame, dfs_frame,
+		CVP_DFS_FRAME_CMD_SIZE*sizeof(unsigned int));
+
+	rc = msm_cvp_session_get_iova_addr(inst, cbuf,
+			src_frame.left_view_buffer_fd,
+			src_frame.left_view_buffer_size,
+			&dest_ptr->left_view_buffer_fd,
+			&dest_ptr->left_view_buffer_size);
+	if (rc) {
+		dprintk(CVP_ERR, "%s:: left buffer not registered. rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = msm_cvp_session_get_iova_addr(inst, cbuf,
+			src_frame.right_view_buffer_fd,
+			src_frame.right_view_buffer_size,
+			&dest_ptr->right_view_buffer_fd,
+			&dest_ptr->right_view_buffer_size);
+	if (rc) {
+		dprintk(CVP_ERR, "%s:: right buffer not registered. rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = msm_cvp_session_get_iova_addr(inst, cbuf,
+			src_frame.disparity_map_buffer_fd,
+			src_frame.disparity_map_buffer_size,
+			&dest_ptr->disparity_map_buffer_fd,
+			&dest_ptr->disparity_map_buffer_size);
+	if (rc) {
+		dprintk(CVP_ERR, "%s:: disparity map not registered. rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = msm_cvp_session_get_iova_addr(inst, cbuf,
+			src_frame.occlusion_mask_buffer_fd,
+			src_frame.occlusion_mask_buffer_size,
+			&dest_ptr->occlusion_mask_buffer_fd,
+			&dest_ptr->occlusion_mask_buffer_size);
+	if (rc) {
+		dprintk(CVP_ERR, "%s:: occlusion mask not registered. rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
 
 	rc = call_hfi_op(hdev, session_cvp_dfs_frame,
-			(void *)inst->session, &vdfs_frame);
+			(void *)inst->session, &internal_dfs_frame);
 
 	if (rc) {
 		dprintk(CVP_ERR,
@@ -332,7 +252,7 @@
 
 static int msm_cvp_session_cvp_dfs_frame_response(
 	struct msm_cvp_inst *inst,
-	struct msm_cvp_dfsframe *dfs_frame)
+	struct msm_cvp_dfs_frame *dfs_frame)
 {
 	int rc = 0;
 
@@ -348,73 +268,207 @@
 		dprintk(CVP_ERR,
 			"%s: wait for signal failed, rc %d\n",
 			__func__, rc);
+	return rc;
+}
+
+/* DME feature system call handling */
+static int msm_cvp_session_cvp_dme_config(
+	struct msm_cvp_inst *inst,
+	struct msm_cvp_dme_config *dme_config)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct msm_cvp_internal_dmeconfig internal_dme_config;
+
+	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+
+	if (!inst || !inst->core || !dme_config) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+	memcpy(&internal_dme_config.dme_config.cvp_dme_config,
+		dme_config, sizeof(struct msm_cvp_dme_config));
+
+	rc = call_hfi_op(hdev, session_cvp_dme_config,
+			(void *)inst->session, &internal_dme_config);
+	if (!rc) {
+		rc = wait_for_sess_signal_receipt(inst,
+			HAL_SESSION_DME_CONFIG_CMD_DONE);
+		if (rc)
+			dprintk(CVP_ERR,
+				"%s: wait for signal failed, rc %d\n",
+				__func__, rc);
+	} else {
+		dprintk(CVP_ERR, "%s Failed in call_hfi_op\n", __func__);
+	}
+	return rc;
+}
+
+static int msm_cvp_session_cvp_dme_frame(
+	struct msm_cvp_inst *inst,
+	struct msm_cvp_dme_frame *dme_frame)
+{
+	int i, rc = 0;
+	struct hfi_device *hdev;
+	struct msm_cvp_internal_dmeframe internal_dme_frame;
+	struct msm_cvp_dme_frame_kmd *dest_ptr = &internal_dme_frame.dme_frame;
+	struct msm_cvp_dme_frame_kmd src_frame;
+	struct msm_cvp_internal_buffer *cbuf;
+
+	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+
+	if (!inst || !inst->core || !dme_frame) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	src_frame = *(struct msm_cvp_dme_frame_kmd *)dme_frame;
+	hdev = inst->core->device;
+	memset(&internal_dme_frame, 0,
+		sizeof(struct msm_cvp_internal_dmeframe));
+
+	memcpy(&internal_dme_frame.dme_frame, dme_frame,
+		CVP_DME_FRAME_CMD_SIZE*sizeof(unsigned int));
+
+	for (i = 0; i < CVP_DME_BUF_NUM; i++) {
+		if (!src_frame.bufs[i].fd) {
+			dest_ptr->bufs[i].fd = src_frame.bufs[i].fd;
+			dest_ptr->bufs[i].size = src_frame.bufs[i].size;
+			continue;
+		}
+
+		rc = msm_cvp_session_get_iova_addr(inst, cbuf,
+				src_frame.bufs[i].fd,
+				src_frame.bufs[i].size,
+				&dest_ptr->bufs[i].fd,
+				&dest_ptr->bufs[i].size);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: %d buffer not registered. rc=%d\n",
+				__func__, i, rc);
+			return rc;
+		}
+
+	}
+
+	rc = call_hfi_op(hdev, session_cvp_dme_frame,
+			(void *)inst->session, &internal_dme_frame);
+
+	if (rc) {
+		dprintk(CVP_ERR,
+			"%s:: Failed in call_hfi_op\n",
+			__func__);
+	}
 
 	return rc;
 }
 
+static int msm_cvp_session_cvp_persist(
+	struct msm_cvp_inst *inst,
+	struct msm_cvp_persist_buf *pbuf_cmd)
+{
+	int i, rc = 0;
+	struct hfi_device *hdev;
+	struct msm_cvp_internal_persist_cmd internal_pcmd;
+	struct msm_cvp_persist_kmd *dest_ptr = &internal_pcmd.persist_cmd;
+	struct msm_cvp_persist_kmd src_frame;
+	struct msm_cvp_internal_buffer *cbuf;
+
+	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+
+	if (!inst || !inst->core || !pbuf_cmd) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	src_frame = *(struct msm_cvp_persist_kmd *)pbuf_cmd;
+	hdev = inst->core->device;
+	memset(&internal_pcmd, 0,
+		sizeof(struct msm_cvp_internal_persist_cmd));
+
+	memcpy(&internal_pcmd.persist_cmd, pbuf_cmd,
+		CVP_PERSIST_CMD_SIZE*sizeof(unsigned int));
+
+	for (i = 0; i < CVP_PSRSIST_BUF_NUM; i++) {
+		if (!src_frame.bufs[i].fd) {
+			dest_ptr->bufs[i].fd = src_frame.bufs[i].fd;
+			dest_ptr->bufs[i].size = src_frame.bufs[i].size;
+			continue;
+		}
+
+		rc = msm_cvp_session_get_iova_addr(inst, cbuf,
+				src_frame.bufs[i].fd,
+				src_frame.bufs[i].size,
+				&dest_ptr->bufs[i].fd,
+				&dest_ptr->bufs[i].size);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s:: %d buffer not registered. rc=%d\n",
+				__func__, i, rc);
+			return rc;
+		}
+	}
+
+	rc = call_hfi_op(hdev, session_cvp_persist,
+			(void *)inst->session, &internal_pcmd);
+
+	if (rc)
+		dprintk(CVP_ERR, "%s: Failed in call_hfi_op\n", __func__);
+
+	return rc;
+}
+
+static int msm_cvp_session_cvp_dme_frame_response(
+	struct msm_cvp_inst *inst,
+	struct msm_cvp_dme_frame *dme_frame)
+{
+	int rc = 0;
+
+	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+
+	if (!inst || !inst->core || !dme_frame) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	rc = wait_for_sess_signal_receipt(inst,
+			HAL_SESSION_DME_FRAME_CMD_DONE);
+	if (rc)
+		dprintk(CVP_ERR,
+			"%s: wait for signal failed, rc %d\n",
+			__func__, rc);
+	return rc;
+}
+
+static int msm_cvp_session_cvp_persist_response(
+	struct msm_cvp_inst *inst,
+	struct msm_cvp_persist_buf *pbuf_cmd)
+{
+	int rc = 0;
+
+	dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+
+	if (!inst || !inst->core || !pbuf_cmd) {
+		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	rc = wait_for_sess_signal_receipt(inst,
+			HAL_SESSION_PERSIST_CMD_DONE);
+	if (rc)
+		dprintk(CVP_ERR,
+			"%s: wait for signal failed, rc %d\n",
+			__func__, rc);
+	return rc;
+}
+
+
 
 static int msm_cvp_send_cmd(struct msm_cvp_inst *inst,
 		struct msm_cvp_send_cmd *send_cmd)
 {
-	int rc = 0;
-	bool found;
-	struct hfi_device *hdev;
-	struct msm_cvp_internal_send_cmd  *csend_cmd;
-	//struct cvp_register_buffer vbuf;
-	struct cvp_frame_data input_frame;
+	dprintk(CVP_ERR, "%s: UMD gave a deprecated cmd", __func__);
 
-	dprintk(CVP_DBG, "%s:: Enter 1", __func__);
-	if (!inst || !inst->core || !send_cmd) {
-		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-	//print_client_buffer(CVP_DBG, "register", inst, send_cmd);
-
-	mutex_lock(&inst->cvpbufs.lock);
-	found = false;
-	list_for_each_entry(csend_cmd, &inst->cvpbufs.list, list) {
-		if (csend_cmd->send_cmd.cmd_address_fd ==
-				send_cmd->cmd_address_fd &&
-			csend_cmd->send_cmd.cmd_size == send_cmd->cmd_size) {
-			found = true;
-			break;
-		}
-	}
-	mutex_unlock(&inst->cvpbufs.lock);
-	if (found)
-		return -EINVAL;
-
-	csend_cmd = kzalloc(
-		sizeof(struct msm_cvp_internal_send_cmd), GFP_KERNEL);
-	if (!csend_cmd) {
-		dprintk(CVP_ERR, "%s: csend_cmd alloc failed\n", __func__);
-		return -ENOMEM;
-	}
-	mutex_lock(&inst->cvpbufs.lock);
-	list_add_tail(&csend_cmd->list, &inst->cvpbufs.list);
-	mutex_unlock(&inst->cvpbufs.lock);
-
-	memset(&input_frame, 0, sizeof(struct cvp_frame_data));
-
-	rc = call_hfi_op(hdev, session_cvp_send_cmd,
-			(void *)inst->session, &input_frame);
-	if (rc)
-		goto exit;
-
-	return rc;
-
-exit:
-	if (csend_cmd->smem.device_addr)
-		msm_cvp_smem_unmap_dma_buf(inst, &csend_cmd->smem);
-	mutex_lock(&inst->cvpbufs.lock);
-	list_del(&csend_cmd->list);
-	mutex_unlock(&inst->cvpbufs.lock);
-	kfree(csend_cmd);
-	csend_cmd = NULL;
-
-	return rc;
+	return 0;
 }
+
 static int msm_cvp_request_power(struct msm_cvp_inst *inst,
 		struct msm_cvp_request_power *power)
 {
@@ -430,51 +484,6 @@
 		__func__, power->clock_cycles_a, power->clock_cycles_b,
 		power->ddr_bw, power->sys_cache_bw);
 
-	rc = msm_cvp_clock_aggregation(inst, power->clock_cycles_a,
-			power->clock_cycles_b);
-	if (!rc) {
-		dprintk(CVP_ERR,
-			"%s: session %#x rejected, cycles: vpss %d, ise %d\n",
-			__func__, hash32_ptr(inst->session),
-			power->clock_cycles_a, power->clock_cycles_b);
-		print_cvp_cycles(inst);
-		msm_cvp_comm_kill_session(inst);
-		return -EOVERFLOW;
-	}
-
-	inst->clk_data.min_freq = max(power->clock_cycles_a,
-		power->clock_cycles_b);
-	/* convert client provided bps into kbps as expected by driver */
-	inst->clk_data.ddr_bw = power->ddr_bw / 1000;
-	inst->clk_data.sys_cache_bw = power->sys_cache_bw / 1000;
-	rc = msm_cvp_scale_clocks_and_bus(inst);
-	if (rc) {
-		dprintk(CVP_ERR,
-			"%s: failed to scale clocks and bus for inst %pK (%#x)\n",
-			__func__, inst, hash32_ptr(inst->session));
-		goto exit;
-	}
-
-	if (!inst->clk_data.min_freq && !inst->clk_data.ddr_bw &&
-		!inst->clk_data.sys_cache_bw) {
-		rc = msm_cvp_session_pause(inst);
-		if (rc) {
-			dprintk(CVP_ERR,
-				"%s: failed to pause inst %pK (%#x)\n",
-				__func__, inst, hash32_ptr(inst->session));
-			goto exit;
-		}
-	} else {
-		rc = msm_cvp_session_resume(inst);
-		if (rc) {
-			dprintk(CVP_ERR,
-				"%s: failed to resume inst %pK (%#x)\n",
-				__func__, inst, hash32_ptr(inst->session));
-			goto exit;
-		}
-	}
-
-exit:
 	return rc;
 }
 
@@ -485,20 +494,25 @@
 	bool found;
 	struct hfi_device *hdev;
 	struct msm_cvp_internal_buffer *cbuf;
-	struct cvp_register_buffer vbuf;
+	struct hal_session *session;
 
 	if (!inst || !inst->core || !buf) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
+
+	session = (struct hal_session *)inst->session;
+	if (!session) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		return -EINVAL;
+	}
 	hdev = inst->core->device;
 	print_client_buffer(CVP_DBG, "register", inst, buf);
 
 	mutex_lock(&inst->cvpbufs.lock);
 	found = false;
 	list_for_each_entry(cbuf, &inst->cvpbufs.list, list) {
-		if (cbuf->buf.index == buf->index &&
-			cbuf->buf.fd == buf->fd &&
+		if (cbuf->buf.fd == buf->fd &&
 			cbuf->buf.offset == buf->offset) {
 			found = true;
 			break;
@@ -524,26 +538,24 @@
 	cbuf->smem.fd = buf->fd;
 	cbuf->smem.offset = buf->offset;
 	cbuf->smem.size = buf->size;
+	cbuf->smem.flags = buf->flags;
 	rc = msm_cvp_smem_map_dma_buf(inst, &cbuf->smem);
 	if (rc) {
 		print_client_buffer(CVP_ERR, "map failed", inst, buf);
 		goto exit;
 	}
 
-	memset(&vbuf, 0, sizeof(struct cvp_register_buffer));
-	vbuf.index = buf->index;
-	vbuf.type = get_hal_buftype(__func__, buf->type);
-	vbuf.size = buf->size;
-	vbuf.device_addr = cbuf->smem.device_addr;
-	vbuf.client_data = cbuf->smem.device_addr;
-	vbuf.response_required = true;
-	rc = call_hfi_op(hdev, session_register_buffer,
-			(void *)inst->session, &vbuf);
-	if (rc) {
-		print_cvp_internal_buffer(CVP_ERR,
-			"register failed", inst, cbuf);
-		goto exit;
+	if (buf->index) {
+		rc = cvp_dsp_register_buffer((uint32_t)cbuf->smem.device_addr,
+			buf->index, buf->size, hash32_ptr(session));
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: failed dsp registration for fd=%d rc=%d",
+				__func__, buf->fd, rc);
+			goto exit;
+		}
 	}
+
 	return rc;
 
 exit:
@@ -565,20 +577,25 @@
 	bool found;
 	struct hfi_device *hdev;
 	struct msm_cvp_internal_buffer *cbuf;
-	struct cvp_unregister_buffer vbuf;
+	struct hal_session *session;
 
 	if (!inst || !inst->core || !buf) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
+
+	session = (struct hal_session *)inst->session;
+	if (!session) {
+		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+		return -EINVAL;
+	}
 	hdev = inst->core->device;
 	print_client_buffer(CVP_DBG, "unregister", inst, buf);
 
 	mutex_lock(&inst->cvpbufs.lock);
 	found = false;
 	list_for_each_entry(cbuf, &inst->cvpbufs.list, list) {
-		if (cbuf->buf.index == buf->index &&
-			cbuf->buf.fd == buf->fd &&
+		if (cbuf->buf.fd == buf->fd &&
 			cbuf->buf.offset == buf->offset) {
 			found = true;
 			break;
@@ -590,18 +607,21 @@
 		return -EINVAL;
 	}
 
-	memset(&vbuf, 0, sizeof(struct cvp_unregister_buffer));
-	vbuf.index = cbuf->buf.index;
-	vbuf.type = get_hal_buftype(__func__, cbuf->buf.type);
-	vbuf.size = cbuf->buf.size;
-	vbuf.device_addr = cbuf->smem.device_addr;
-	vbuf.client_data = cbuf->smem.device_addr;
-	vbuf.response_required = true;
-	rc = call_hfi_op(hdev, session_unregister_buffer,
-			(void *)inst->session, &vbuf);
-	if (rc)
-		print_cvp_internal_buffer(CVP_ERR,
-			"unregister failed", inst, cbuf);
+	if (buf->index) {
+		rc = cvp_dsp_deregister_buffer((uint32_t)cbuf->smem.device_addr,
+			buf->index, buf->size, hash32_ptr(session));
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s: failed dsp registration for fd = %d rc=%d",
+				__func__, buf->fd, rc);
+		}
+	}
+
+	if (cbuf->smem.device_addr)
+		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
+
+	list_del(&cbuf->list);
+	kfree(cbuf);
 
 	return rc;
 }
@@ -614,7 +634,7 @@
 		dprintk(CVP_ERR, "%s: invalid args\n", __func__);
 		return -EINVAL;
 	}
-	dprintk(CVP_DBG, "%s:: arg->type = %d", __func__, arg->type);
+	dprintk(CVP_DBG, "%s:: arg->type = %x", __func__, arg->type);
 
 	switch (arg->type) {
 	case MSM_CVP_GET_SESSION_INFO:
@@ -661,26 +681,66 @@
 	}
 	case MSM_CVP_HFI_DFS_CONFIG_CMD:
 	{
-		struct msm_cvp_dfsconfig *dfsconfig =
-			(struct msm_cvp_dfsconfig *)&arg->data.dfsconfig;
+		struct msm_cvp_dfs_config *dfs_config =
+			(struct msm_cvp_dfs_config *)&arg->data.dfs_config;
 
-		rc = msm_cvp_session_cvp_dfs_config(inst, dfsconfig);
+		rc = msm_cvp_session_cvp_dfs_config(inst, dfs_config);
 		break;
 	}
 	case MSM_CVP_HFI_DFS_FRAME_CMD:
 	{
-		struct msm_cvp_dfsframe *dfsframe =
-			(struct msm_cvp_dfsframe *)&arg->data.dfsframe;
+		struct msm_cvp_dfs_frame *dfs_frame =
+			(struct msm_cvp_dfs_frame *)&arg->data.dfs_frame;
 
-		rc = msm_cvp_session_cvp_dfs_frame(inst, dfsframe);
+		rc = msm_cvp_session_cvp_dfs_frame(inst, dfs_frame);
 		break;
 	}
 	case MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE:
 	{
-		struct msm_cvp_dfsframe *dfsframe =
-			(struct msm_cvp_dfsframe *)&arg->data.dfsframe;
+		struct msm_cvp_dfs_frame *dfs_frame =
+			(struct msm_cvp_dfs_frame *)&arg->data.dfs_frame;
 
-		rc = msm_cvp_session_cvp_dfs_frame_response(inst, dfsframe);
+		rc = msm_cvp_session_cvp_dfs_frame_response(inst, dfs_frame);
+		break;
+	}
+	case MSM_CVP_HFI_DME_CONFIG_CMD:
+	{
+		struct msm_cvp_dme_config *dme_config =
+			(struct msm_cvp_dme_config *)&arg->data.dme_config;
+
+		rc = msm_cvp_session_cvp_dme_config(inst, dme_config);
+		break;
+	}
+	case MSM_CVP_HFI_DME_FRAME_CMD:
+	{
+		struct msm_cvp_dme_frame *dme_frame =
+			(struct msm_cvp_dme_frame *)&arg->data.dme_frame;
+
+		rc = msm_cvp_session_cvp_dme_frame(inst, dme_frame);
+		break;
+	}
+	case MSM_CVP_HFI_DME_FRAME_CMD_RESPONSE:
+	{
+		struct msm_cvp_dme_frame *dmeframe =
+			(struct msm_cvp_dme_frame *)&arg->data.dme_frame;
+
+		rc = msm_cvp_session_cvp_dme_frame_response(inst, dmeframe);
+		break;
+	}
+	case MSM_CVP_HFI_PERSIST_CMD:
+	{
+		struct msm_cvp_persist_buf *pbuf_cmd =
+			(struct msm_cvp_persist_buf *)&arg->data.pbuf_cmd;
+
+		rc = msm_cvp_session_cvp_persist(inst, pbuf_cmd);
+		break;
+	}
+	case MSM_CVP_HFI_PERSIST_CMD_RESPONSE:
+	{
+		struct msm_cvp_persist_buf *pbuf_cmd =
+			(struct msm_cvp_persist_buf *)&arg->data.pbuf_cmd;
+
+		rc = msm_cvp_session_cvp_persist_response(inst, pbuf_cmd);
 		break;
 	}
 	default:
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.h b/drivers/media/platform/msm/cvp/msm_cvp.h
index 56098ee..6bcb799 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp.h
@@ -10,11 +10,7 @@
 #include "msm_cvp_common.h"
 #include "msm_cvp_clocks.h"
 #include "msm_cvp_debug.h"
-
-void cvp_handle_session_register_buffer_done(enum hal_command_response cmd,
-		void *resp);
-void cvp_handle_session_unregister_buffer_done(enum hal_command_response cmd,
-		void *resp);
+#include "msm_cvp_dsp.h"
 int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct msm_cvp_arg *arg);
 int msm_cvp_session_init(struct msm_cvp_inst *inst);
 int msm_cvp_session_deinit(struct msm_cvp_inst *inst);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.c b/drivers/media/platform/msm/cvp/msm_cvp_common.c
index 2e97c3e..4f68dad 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.c
@@ -585,7 +585,7 @@
 	return inst;
 }
 
-static void cvp_handle_session_dfs_cmd_done(enum hal_command_response cmd,
+static void cvp_handle_session_cmd_done(enum hal_command_response cmd,
 	void *data)
 {
 	struct msm_cvp_cb_cmd_done *response = data;
@@ -614,6 +614,7 @@
 	} else
 		dprintk(CVP_ERR,
 			"%s: Invalid inst cmd response: %d\n", __func__, cmd);
+	cvp_put_inst(inst);
 }
 
 static void handle_session_set_buf_done(enum hal_command_response cmd,
@@ -644,6 +645,7 @@
 		complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
 	else
 		dprintk(CVP_ERR, "set_buf_done: invalid cmd: %d\n", cmd);
+	cvp_put_inst(inst);
 
 }
 
@@ -1137,6 +1139,7 @@
 				HAL_BUFFER_OUTPUT);
 		if (!bufreq) {
 			mutex_unlock(&inst->lock);
+			cvp_put_inst(inst);
 			return;
 		}
 
@@ -1148,6 +1151,7 @@
 				HAL_BUFFER_OUTPUT2);
 		if (!bufreq) {
 			mutex_unlock(&inst->lock);
+			cvp_put_inst(inst);
 			return;
 		}
 
@@ -1162,6 +1166,7 @@
 				HAL_BUFFER_OUTPUT);
 		if (!bufreq) {
 			mutex_unlock(&inst->lock);
+			cvp_put_inst(inst);
 			return;
 		}
 
@@ -1485,7 +1490,7 @@
 	}
 
 	dprintk(CVP_WARN, "SYS_ERROR received for core %pK\n", core);
-	msm_cvp_noc_error_info(core);
+	/* msm_cvp_noc_error_info(core) is disabled as of now */
 	call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
 	list_for_each_entry(inst, &core->instances, list) {
 		dprintk(CVP_WARN,
@@ -1679,11 +1684,6 @@
 		return HAL_BUFFER_OUTPUT;
 }
 
-static void handle_dfs(enum hal_command_response cmd, void *data)
-{
-	dprintk(CVP_ERR, "%s: is called\n", __func__);
-}
-
 void cvp_handle_cmd_response(enum hal_command_response cmd, void *data)
 {
 	dprintk(CVP_DBG, "Command response = %d\n", cmd);
@@ -1700,9 +1700,6 @@
 	case HAL_SESSION_CVP_OPERATION_CONFIG:
 		handle_operation_config(cmd, data);
 		break;
-	case HAL_SESSION_CVP_DFS:
-		handle_dfs(cmd, data);
-		break;
 	case HAL_SESSION_RELEASE_RESOURCE_DONE:
 		handle_release_res_done(cmd, data);
 		break;
@@ -1729,15 +1726,12 @@
 	case HAL_SESSION_RELEASE_BUFFER_DONE:
 		handle_session_release_buf_done(cmd, data);
 		break;
-	case HAL_SESSION_REGISTER_BUFFER_DONE:
-		cvp_handle_session_register_buffer_done(cmd, data);
-		break;
-	case HAL_SESSION_UNREGISTER_BUFFER_DONE:
-		cvp_handle_session_unregister_buffer_done(cmd, data);
-		break;
 	case HAL_SESSION_DFS_CONFIG_CMD_DONE:
 	case HAL_SESSION_DFS_FRAME_CMD_DONE:
-		cvp_handle_session_dfs_cmd_done(cmd, data);
+	case HAL_SESSION_DME_CONFIG_CMD_DONE:
+	case HAL_SESSION_DME_FRAME_CMD_DONE:
+	case HAL_SESSION_PERSIST_CMD_DONE:
+		cvp_handle_session_cmd_done(cmd, data);
 		break;
 	default:
 		dprintk(CVP_DBG, "response unhandled: %d\n", cmd);
@@ -1986,7 +1980,7 @@
 	hdev = core->device;
 	mutex_lock(&core->lock);
 	if (core->state >= CVP_CORE_INIT) {
-		dprintk(CVP_INFO, "Video core: %d is already in state: %d\n",
+		dprintk(CVP_DBG, "CVP core: %d is already in state: %d\n",
 				core->id, core->state);
 		goto core_already_inited;
 	}
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_core.c b/drivers/media/platform/msm/cvp/msm_cvp_core.c
index 1453401..75de60d 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_core.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_core.c
@@ -1262,7 +1262,7 @@
 
 	msm_cvp_debugfs_deinit_inst(inst);
 
-	pr_info(CVP_DBG_TAG "Closed video instance: %pK\n",
+	pr_info(CVP_DBG_TAG "Closed cvp instance: %pK\n",
 			"info", inst);
 	kfree(inst);
 	return 0;
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
index 6eeb481..740a709 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
@@ -21,39 +21,50 @@
 #define STATUS_DEINIT 1
 #define STATUS_OK 2
 #define STATUS_SSR 3
+#define CVP_DSP_MAX_RESERVED 5
 
-struct cvpd_cmd_msg {
+struct cvp_dsp_cmd_msg {
 	uint32_t cmd_msg_type;
 	int32_t ret_val;
 	uint64_t msg_ptr;
 	uint32_t msg_ptr_len;
 	uint32_t iova_buff_addr;
 	uint32_t buff_index;
-	uint32_t buf_size;
+	uint32_t buff_size;
 	uint32_t session_id;
-	uint32_t context;
+	int64_t ddr_type;
+	uint32_t reserved[CVP_DSP_MAX_RESERVED];
 };
 
-struct cvpd_rsp_msg {
-	uint32_t context;
+struct cvp_dsp_rsp_msg {
+	uint32_t cmd_msg_type;
 	int32_t ret_val;
+	uint32_t reserved[CVP_DSP_MAX_RESERVED];
+};
+
+struct cvp_dsp_rsp_context {
+	struct completion work;
 };
 
 struct cvp_dsp_apps {
 	struct rpmsg_device *chan;
 	struct mutex smd_mutex;
+	struct mutex reg_buffer_mutex;
+	struct mutex dereg_buffer_mutex;
 	int rpmsg_register;
 	uint32_t cdsp_state;
 	uint32_t cvp_shutdown;
+	struct completion reg_buffer_work;
+	struct completion dereg_buffer_work;
+	struct completion shutdown_work;
 };
 
-static struct completion work;
 
 static struct cvp_dsp_apps gfa_cv;
 
-static struct cvpd_cmd_msg cmd_msg;
+static struct cvp_dsp_cmd_msg cmd_msg;
 
-static struct cvpd_rsp_msg cmd_msg_rsp;
+static struct cvp_dsp_rsp_msg cmd_msg_rsp;
 
 static int cvp_dsp_send_cmd(void *msg, uint32_t len)
 {
@@ -82,7 +93,8 @@
 	int destVMperm[SRC_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC };
 
 	if (strcmp(rpdev->dev.parent->of_node->name, "cdsp")) {
-		pr_err("%s: Failed to probe rpmsg device.Node name:%s\n",
+		dprintk(CVP_ERR,
+			"%s: Failed to probe rpmsg device.Node name:%s\n",
 			__func__, rpdev->dev.parent->of_node->name);
 		err = -EINVAL;
 		goto bail;
@@ -100,15 +112,17 @@
 			msg_ptr_len, srcVM, DEST_VM_NUM, destVM,
 			destVMperm, SRC_VM_NUM);
 		if (err) {
-			pr_err("%s: Failed to hyp_assign. err=%d\n",
+			dprintk(CVP_ERR,
+				"%s: Failed to hyp_assign. err=%d\n",
 				__func__, err);
 			return err;
 		}
 		err = cvp_dsp_send_cmd_hfi_queue(
 			(phys_addr_t *)msg_ptr, msg_ptr_len);
 		if (err) {
-			pr_err("%s: Failed to send HFI Queue address. err=%d\n",
-			__func__, err);
+			dprintk(CVP_ERR,
+				"%s: Failed to send HFI Queue address. err=%d\n",
+				__func__, err);
 			goto bail;
 		}
 		mutex_lock(&me->smd_mutex);
@@ -116,7 +130,8 @@
 		mutex_unlock(&me->smd_mutex);
 	}
 
-	pr_info("%s: Successfully probed. cdsp_state=%d cvp_shutdown=%d\n",
+	dprintk(CVP_INFO,
+		"%s: Successfully probed. cdsp_state=%d cvp_shutdown=%d\n",
 		__func__, cdsp_state, cvp_shutdown);
 bail:
 	return err;
@@ -130,17 +145,36 @@
 	me->chan = NULL;
 	me->cdsp_state = STATUS_SSR;
 	mutex_unlock(&me->smd_mutex);
-	pr_info("%s: CDSP SSR triggered\n", __func__);
+	dprintk(CVP_INFO,
+		"%s: CDSP SSR triggered\n", __func__);
 }
 
 static int cvp_dsp_rpmsg_callback(struct rpmsg_device *rpdev,
 	void *data, int len, void *priv, u32 addr)
 {
-	int *rpmsg_resp = (int *)data;
+	struct cvp_dsp_rsp_msg *dsp_response =
+		(struct cvp_dsp_rsp_msg *)data;
+	struct cvp_dsp_apps *me = &gfa_cv;
 
-	cmd_msg_rsp.ret_val = *rpmsg_resp;
-	complete(&work);
-
+	dprintk(CVP_DBG,
+		"%s: cmd_msg_type=0x%x dsp_response->ret_val =0x%x\n"
+		, __func__, dsp_response->cmd_msg_type, dsp_response->ret_val);
+	switch (dsp_response->cmd_msg_type) {
+	case CVP_DSP_REGISTER_BUFFER:
+		complete(&me->reg_buffer_work);
+		break;
+	case CVP_DSP_DEREGISTER_BUFFER:
+		complete(&me->dereg_buffer_work);
+		break;
+	case CVP_DSP_SHUTDOWN:
+		complete(&me->shutdown_work);
+		break;
+	default:
+		dprintk(CVP_ERR,
+		"%s: Invalid cmd_msg_type received from dsp: %d\n",
+		__func__, dsp_response->cmd_msg_type);
+		break;
+	}
 	return 0;
 }
 
@@ -148,7 +182,7 @@
 	uint32_t size_in_bytes)
 {
 	int err;
-	struct cvpd_cmd_msg local_cmd_msg;
+	struct cvp_dsp_cmd_msg local_cmd_msg;
 	struct cvp_dsp_apps *me = &gfa_cv;
 	int srcVM[SRC_VM_NUM] = {VMID_HLOS};
 	int destVM[DEST_VM_NUM] = {VMID_HLOS, VMID_CDSP_Q6};
@@ -163,22 +197,25 @@
 	cmd_msg.msg_ptr_len = (size_in_bytes);
 	mutex_unlock(&me->smd_mutex);
 
-	pr_debug("%s :: address of buffer, PA=0x%pK  size_buff=%d\n",
+	dprintk(CVP_DBG,
+		"%s :: address of buffer, PA=0x%pK  size_buff=%d\n",
 		__func__, phys_addr, size_in_bytes);
 
 	err = hyp_assign_phys((uint64_t)local_cmd_msg.msg_ptr,
 		local_cmd_msg.msg_ptr_len, srcVM, SRC_VM_NUM, destVM,
 		destVMperm, DEST_VM_NUM);
 	if (err) {
-		pr_err("%s: Failed in hyp_assign. err=%d\n",
+		dprintk(CVP_ERR,
+			"%s: Failed in hyp_assign. err=%d\n",
 			__func__, err);
 		return err;
 	}
 
 	err = cvp_dsp_send_cmd
-			 (&local_cmd_msg, sizeof(struct cvpd_cmd_msg));
+			 (&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
 	if (err != 0)
-		pr_err("%s: cvp_dsp_send_cmd failed with err=%d\n",
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with err=%d\n",
 			__func__, err);
 	else {
 		mutex_lock(&me->smd_mutex);
@@ -193,7 +230,7 @@
 int cvp_dsp_suspend(uint32_t session_flag)
 {
 	int err = 0;
-	struct cvpd_cmd_msg local_cmd_msg;
+	struct cvp_dsp_cmd_msg local_cmd_msg;
 	struct cvp_dsp_apps *me = &gfa_cv;
 	uint32_t cdsp_state;
 
@@ -206,9 +243,10 @@
 
 	local_cmd_msg.cmd_msg_type = CVP_DSP_SUSPEND;
 	err = cvp_dsp_send_cmd
-			 (&local_cmd_msg, sizeof(struct cvpd_cmd_msg));
+			 (&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
 	if (err != 0)
-		pr_err("%s: cvp_dsp_send_cmd failed with err=%d\n",
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with err=%d\n",
 			__func__, err);
 
 	return err;
@@ -217,7 +255,7 @@
 int cvp_dsp_resume(uint32_t session_flag)
 {
 	int err;
-	struct cvpd_cmd_msg local_cmd_msg;
+	struct cvp_dsp_cmd_msg local_cmd_msg;
 	struct cvp_dsp_apps *me = &gfa_cv;
 	uint32_t cdsp_state;
 
@@ -230,9 +268,10 @@
 
 	local_cmd_msg.cmd_msg_type = CVP_DSP_RESUME;
 	err = cvp_dsp_send_cmd
-			 (&local_cmd_msg, sizeof(struct cvpd_cmd_msg));
+			 (&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
 	if (err != 0)
-		pr_err("%s: cvp_dsp_send_cmd failed with err=%d\n",
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with err=%d\n",
 			__func__, err);
 
 	return err;
@@ -242,19 +281,20 @@
 {
 	struct cvp_dsp_apps *me = &gfa_cv;
 	int err, local_cmd_msg_rsp;
-	struct cvpd_cmd_msg local_cmd_msg;
+	struct cvp_dsp_cmd_msg local_cmd_msg;
 	int srcVM[DEST_VM_NUM] = {VMID_HLOS, VMID_CDSP_Q6};
 	int destVM[SRC_VM_NUM] = {VMID_HLOS};
 	int destVMperm[SRC_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC };
 
 	local_cmd_msg.cmd_msg_type = CVP_DSP_SHUTDOWN;
 	err = cvp_dsp_send_cmd
-			 (&local_cmd_msg, sizeof(struct cvpd_cmd_msg));
+			 (&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
 	if (err != 0)
-		pr_err("%s: cvp_dsp_send_cmd failed with err=%d\n",
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with err=%d\n",
 			__func__, err);
 
-	wait_for_completion(&work);
+	wait_for_completion(&me->shutdown_work);
 
 	mutex_lock(&me->smd_mutex);
 	me->cvp_shutdown = STATUS_SSR;
@@ -267,18 +307,106 @@
 			local_cmd_msg.msg_ptr_len, srcVM, DEST_VM_NUM,
 			destVM,	destVMperm, SRC_VM_NUM);
 		if (err) {
-			pr_err("%s: Failed to hyp_assign. err=%d\n",
+			dprintk(CVP_ERR,
+				"%s: Failed to hyp_assign. err=%d\n",
 				__func__, err);
 			return err;
 		}
 	} else {
-		pr_err("%s: Skipping hyp_assign as CDSP sent invalid response=%d\n",
+		dprintk(CVP_ERR,
+			"%s: Skipping hyp_assign as CDSP sent invalid response=%d\n",
 			__func__, local_cmd_msg_rsp);
 	}
 
 	return err;
 }
 
+int cvp_dsp_register_buffer(uint32_t iova_buff_addr,
+	uint32_t buff_index, uint32_t buff_size,
+	uint32_t session_id)
+{
+	struct cvp_dsp_cmd_msg local_cmd_msg;
+	int err;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	local_cmd_msg.cmd_msg_type = CVP_DSP_REGISTER_BUFFER;
+	local_cmd_msg.iova_buff_addr = iova_buff_addr;
+	local_cmd_msg.buff_index = buff_index;
+	local_cmd_msg.buff_size = buff_size;
+	local_cmd_msg.session_id = session_id;
+	dprintk(CVP_DBG,
+		"%s: cmd_msg_type=0x%x, iova_buff_addr=0x%x buff_index=0x%x\n",
+		__func__, local_cmd_msg.cmd_msg_type, iova_buff_addr,
+		local_cmd_msg.buff_index);
+	dprintk(CVP_DBG,
+		"%s: buff_size=0x%x session_id=0x%x\n",
+		__func__, local_cmd_msg.buff_size, local_cmd_msg.session_id);
+
+	mutex_lock(&me->reg_buffer_mutex);
+	err = cvp_dsp_send_cmd
+			 (&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
+	if (err != 0) {
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with err=%d\n",
+			__func__, err);
+		mutex_unlock(&me->reg_buffer_mutex);
+		return err;
+	}
+
+	dprintk(CVP_DBG,
+		"%s: calling wait_for_completion work=%pK\n",
+		__func__, &me->reg_buffer_work);
+	wait_for_completion(&me->reg_buffer_work);
+	mutex_unlock(&me->reg_buffer_mutex);
+	dprintk(CVP_DBG,
+			"%s: done calling wait_for_completion\n", __func__);
+
+	return err;
+}
+
+int cvp_dsp_deregister_buffer(uint32_t iova_buff_addr,
+	uint32_t buff_index, uint32_t buff_size,
+	uint32_t session_id)
+{
+	struct cvp_dsp_cmd_msg local_cmd_msg;
+	int err;
+	struct cvp_dsp_apps *me = &gfa_cv;
+
+	local_cmd_msg.cmd_msg_type = CVP_DSP_DEREGISTER_BUFFER;
+	local_cmd_msg.iova_buff_addr = iova_buff_addr;
+	local_cmd_msg.buff_index = buff_index;
+	local_cmd_msg.buff_size = buff_size;
+	local_cmd_msg.session_id = session_id;
+	dprintk(CVP_DBG,
+		"%s: cmd_msg_type=0x%x, iova_buff_addr=0x%x buff_index=0x%x\n",
+		__func__, local_cmd_msg.cmd_msg_type, iova_buff_addr,
+		local_cmd_msg.buff_index);
+	dprintk(CVP_DBG,
+			"%s: buff_size=0x%x session_id=0x%x\n",
+		__func__, local_cmd_msg.buff_size, local_cmd_msg.session_id);
+
+	mutex_lock(&me->dereg_buffer_mutex);
+	err = cvp_dsp_send_cmd
+			 (&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
+	if (err != 0) {
+		dprintk(CVP_ERR,
+			"%s: cvp_dsp_send_cmd failed with err=%d\n",
+			__func__, err);
+		mutex_unlock(&me->dereg_buffer_mutex);
+		return err;
+	}
+
+	dprintk(CVP_DBG,
+			"%s: calling wait_for_completion work=%pK\n",
+			__func__, &me->dereg_buffer_work);
+	wait_for_completion(&me->dereg_buffer_work);
+	dprintk(CVP_DBG,
+			"%s: done calling wait_for_completion\n", __func__);
+	mutex_unlock(&me->dereg_buffer_mutex);
+
+	return err;
+}
+
 static const struct rpmsg_device_id cvp_dsp_rpmsg_match[] = {
 	{ CVP_APPS_DSP_GLINK_GUID },
 	{ },
@@ -299,13 +427,18 @@
 	struct cvp_dsp_apps *me = &gfa_cv;
 	int err;
 
-	init_completion(&work);
 	mutex_init(&me->smd_mutex);
+	mutex_init(&me->reg_buffer_mutex);
+	mutex_init(&me->dereg_buffer_mutex);
+	init_completion(&me->shutdown_work);
+	init_completion(&me->reg_buffer_work);
+	init_completion(&me->dereg_buffer_work);
 	me->cvp_shutdown = STATUS_INIT;
 	me->cdsp_state = STATUS_INIT;
 	err = register_rpmsg_driver(&cvp_dsp_rpmsg_client);
 	if (err) {
-		pr_err("%s : register_rpmsg_driver failed with err %d\n",
+		dprintk(CVP_ERR,
+			"%s : register_rpmsg_driver failed with err %d\n",
 			__func__, err);
 		goto register_bail;
 	}
@@ -325,6 +458,8 @@
 	me->cvp_shutdown = STATUS_DEINIT;
 	me->cdsp_state = STATUS_DEINIT;
 	mutex_destroy(&me->smd_mutex);
+	mutex_destroy(&me->reg_buffer_mutex);
+	mutex_destroy(&me->dereg_buffer_mutex);
 	if (me->rpmsg_register == 1)
 		unregister_rpmsg_driver(&cvp_dsp_rpmsg_client);
 }
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.h b/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
index 6d7a3fc..d200942 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
@@ -7,6 +7,7 @@
 #define MSM_CVP_DSP_H
 
 #include <linux/types.h>
+#include "msm_cvp_debug.h"
 
 #define CVP_APPS_DSP_GLINK_GUID "cvp-glink-apps-dsp"
 #define CVP_APPS_DSP_SMD_GUID "cvp-smd-apps-dsp"
@@ -51,5 +52,29 @@
  */
 int cvp_dsp_shutdown(uint32_t session_flag);
 
+/*
+ * API to register iova buffer address with CDSP
+ *
+ * @iova_buff_addr: IOVA buffer address
+ * @buff_index:     buffer index
+ * @buff_size:      size in bytes of cvp buffer
+ * @session_id:     cvp session id
+ */
+int cvp_dsp_register_buffer(uint32_t iova_buff_addr,
+	uint32_t buff_index, uint32_t buff_size,
+	uint32_t session_id);
+
+/*
+ * API to de-register iova buffer address from CDSP
+ *
+ * @iova_buff_addr: IOVA buffer address
+ * @buff_index:     buffer index
+ * @buff_size:      size in bytes of cvp buffer
+ * @session_id:     cvp session id
+ */
+int cvp_dsp_deregister_buffer(uint32_t iova_buff_addr,
+	uint32_t buff_index, uint32_t buff_size,
+	uint32_t session_id);
+
 #endif // MSM_CVP_DSP_H
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_internal.h b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
index c3deeba..3b580fa 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_internal.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
@@ -526,12 +526,6 @@
 	struct msm_cvp_send_cmd send_cmd;
 };
 
-struct msm_cvp_internal_dfsconfig {
-	struct list_head list;
-	struct msm_smem smem;
-	struct msm_cvp_dfsconfig dfsconfig;
-};
-
 void msm_cvp_comm_handle_thermal_event(void);
 int msm_cvp_smem_alloc(size_t size, u32 align, u32 flags,
 	enum hal_buffer buffer_type, int map_kernel,
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_platform.c b/drivers/media/platform/msm/cvp/msm_cvp_platform.c
index dfc2855..d808082 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_platform.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_platform.c
@@ -94,7 +94,7 @@
 	},
 	{
 		.key = "qcom,sw-power-collapse",
-		.value = 1,
+		.value = 0,
 	},
 	{
 		.key = "qcom,domain-attr-non-fatal-faults",
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c b/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
index ca47d00..422b6b6 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
@@ -907,8 +907,17 @@
 		goto remove_cb;
 	}
 
-	if (cb->is_secure)
+	if (cb->is_secure) {
 		secure_vmid = get_secure_vmid(cb);
+		rc = iommu_domain_set_attr(cb->mapping->domain,
+			DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
+		if (rc) {
+			dprintk(CVP_ERR,
+				"%s - Couldn't arm_iommu_set_attr vmid\n",
+				__func__);
+			goto release_mapping;
+		}
+	}
 
 	if (res->cache_pagetables) {
 		int cache_pagetables = 1;
diff --git a/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c b/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
index 79a84aa..047dc49 100644
--- a/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_v4l2_cvp.c
@@ -72,7 +72,7 @@
 	rc = msm_cvp_close(cvp_inst);
 	filp->private_data = NULL;
 	trace_msm_v4l2_cvp_close_end("msm v4l2_close end");
-	return rc;
+	return 0;
 }
 
 static int msm_cvp_v4l2_querycap(struct file *filp, void *fh,
diff --git a/drivers/media/platform/msm/cvp/msm_v4l2_private.c b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
index 530f18e..35196a3 100644
--- a/drivers/media/platform/msm/cvp/msm_v4l2_private.c
+++ b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
@@ -105,87 +105,77 @@
 	}
 	case MSM_CVP_HFI_DFS_CONFIG_CMD:
 	{
-		struct msm_cvp_dfsconfig *k, *u;
+		struct msm_cvp_dfs_config *k, *u;
 
-		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DFS_CONFIG_CMD\n",
-				__func__);
-		k = &kp->data.dfsconfig;
-		u = &up->data.dfsconfig;
-		if (get_user(k->cmd_address, &u->cmd_address) ||
-			get_user(k->cmd_size, &u->cmd_size) ||
-			get_user(k->packet_type, &u->packet_type) ||
-			get_user(k->session_id, &u->session_id) ||
-			get_user(k->srcbuffer_format, &u->srcbuffer_format) ||
-			get_user(
-			k->left_plane_info.stride[HFI_MAX_PLANES - 1],
-			&u->left_plane_info.stride[HFI_MAX_PLANES - 1]) ||
-			get_user(
-			k->left_plane_info.buf_size[HFI_MAX_PLANES - 1],
-			&u->left_plane_info.buf_size[HFI_MAX_PLANES - 1]) ||
-			get_user(
-			k->right_plane_info.stride[HFI_MAX_PLANES - 1],
-			&u->right_plane_info.stride[HFI_MAX_PLANES - 1]) ||
-			get_user(
-			k->right_plane_info.buf_size[HFI_MAX_PLANES - 1],
-			&u->right_plane_info.buf_size[HFI_MAX_PLANES - 1]) ||
-			get_user(k->width, &u->width) ||
-			get_user(k->height, &u->height) ||
-			get_user(k->occlusionmask_enable,
-				&u->occlusionmask_enable) ||
-			get_user(k->occlusioncost, &u->occlusioncost) ||
-			get_user(k->occlusionshift, &u->occlusionshift) ||
-			get_user(k->maxdisparity, &u->maxdisparity) ||
-			get_user(k->disparityoffset, &u->disparityoffset) ||
-			get_user(k->medianfilter_enable,
-				&u->medianfilter_enable) ||
-			get_user(k->occlusionbound, &u->occlusionbound) ||
-			get_user(k->occlusionfilling_enable,
-				&u->occlusionfilling_enable) ||
-			get_user(k->occlusionmaskdump,
-				&u->occlusionmaskdump) ||
-			get_user(k->clientdata.transactionid,
-				&u->clientdata.transactionid) ||
-			get_user(k->clientdata.client_data1,
-				&u->clientdata.client_data1) ||
-			get_user(k->clientdata.client_data2,
-				&u->clientdata.client_data2))
-			return -EFAULT;
-		for (i = 0; i < MAX_DFS_HFI_PARAMS; i++)
-			if (get_user(k->reserved[i], &u->reserved[i]))
+		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DFS_CONFIG_CMD\n", __func__);
+		k = &kp->data.dfs_config;
+		u = &up->data.dfs_config;
+		for (i = 0; i < CVP_DFS_CONFIG_CMD_SIZE; i++)
+			if (get_user(k->cvp_dfs_config[i],
+				&u->cvp_dfs_config[i]))
 				return -EFAULT;
 		break;
 	}
 	case MSM_CVP_HFI_DFS_FRAME_CMD:
 	case MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE:
 	{
-		struct msm_cvp_dfsframe *k, *u;
+		struct msm_cvp_dfs_frame *k, *u;
 
-		dprintk(CVP_DBG, "%s: Type =%d\n",
-							__func__, kp->type);
-		k = &kp->data.dfsframe;
-		u = &up->data.dfsframe;
-		if (get_user(k->cmd_address, &u->cmd_address) ||
-			get_user(k->cmd_size, &u->cmd_size) ||
-			get_user(k->packet_type, &u->packet_type) ||
-			get_user(k->session_id, &u->session_id) ||
-			get_user(k->left_buffer_index,
-				&u->left_buffer_index) ||
-			get_user(k->right_buffer_index,
-				&u->right_buffer_index) ||
-			get_user(k->disparitymap_buffer_idx,
-				&u->disparitymap_buffer_idx) ||
-			get_user(k->occlusionmask_buffer_idx,
-				&u->occlusionmask_buffer_idx) ||
-			get_user(k->clientdata.transactionid,
-				&u->clientdata.transactionid) ||
-			get_user(k->clientdata.client_data1,
-				&u->clientdata.client_data1) ||
-			get_user(k->clientdata.client_data2,
-				&u->clientdata.client_data2))
-			return -EFAULT;
+		dprintk(CVP_DBG, "%s: Type =%d\n", __func__, kp->type);
+		k = &kp->data.dfs_frame;
+		u = &up->data.dfs_frame;
+		for (i = 0; i < CVP_DFS_FRAME_CMD_SIZE; i++)
+			if (get_user(k->frame_data[i], &u->frame_data[i]))
+				return -EFAULT;
 
 		break;
 	}
+	case MSM_CVP_HFI_DME_CONFIG_CMD:
+	{
+		struct msm_cvp_dme_config *k, *u;
+
+		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DFS_CONFIG_CMD\n", __func__);
+		k = &kp->data.dme_config;
+		u = &up->data.dme_config;
+		for (i = 0; i < CVP_DME_CONFIG_CMD_SIZE; i++)
+			if (get_user(k->cvp_dme_config[i],
+				&u->cvp_dme_config[i]))
+				return -EFAULT;
+		break;
+	}
+	case MSM_CVP_HFI_DME_FRAME_CMD:
+	case MSM_CVP_HFI_DME_FRAME_CMD_RESPONSE:
+	{
+		struct msm_cvp_dme_frame *k, *u;
+
+		dprintk(CVP_DBG, "%s: type = %d\n",
+					__func__, kp->type);
+		k = &kp->data.dme_frame;
+		u = &up->data.dme_frame;
+
+		for (i = 0; i < CVP_DME_FRAME_CMD_SIZE; i++)
+			if (get_user(k->frame_data[i], &u->frame_data[i]))
+				return -EFAULT;
+
+		break;
+	}
+	case MSM_CVP_HFI_PERSIST_CMD:
+	case MSM_CVP_HFI_PERSIST_CMD_RESPONSE:
+	{
+		struct msm_cvp_persist_buf *k, *u;
+
+		dprintk(CVP_DBG, "%s: type = %d\n",
+					__func__, kp->type);
+		k = &kp->data.pbuf_cmd;
+		u = &up->data.pbuf_cmd;
+
+		for (i = 0; i < CVP_PERSIST_CMD_SIZE; i++)
+			if (get_user(k->persist_data[i], &u->persist_data[i]))
+				return -EFAULT;
+
+		break;
+	}
+
 	default:
 		dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
 			__func__, kp->type);
@@ -297,87 +287,78 @@
 	}
 	case MSM_CVP_HFI_DFS_CONFIG_CMD:
 	{
-		struct msm_cvp_dfsconfig *k, *u;
+		struct msm_cvp_dfs_config *k, *u;
 
 		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DFS_CONFIG_CMD\n",
 					__func__);
 
-		k = &kp->data.dfsconfig;
-		u = &up->data.dfsconfig;
-		if (put_user(k->cmd_address, &u->cmd_address) ||
-			put_user(k->cmd_size, &u->cmd_size) ||
-			put_user(k->packet_type, &u->packet_type) ||
-			put_user(k->session_id, &u->session_id) ||
-			put_user(k->srcbuffer_format, &u->srcbuffer_format) ||
-			put_user(
-			k->left_plane_info.stride[HFI_MAX_PLANES - 1],
-			&u->left_plane_info.stride[HFI_MAX_PLANES - 1]) ||
-			put_user(
-			k->left_plane_info.buf_size[HFI_MAX_PLANES - 1],
-			&u->left_plane_info.buf_size[HFI_MAX_PLANES - 1]) ||
-			put_user(
-			k->right_plane_info.stride[HFI_MAX_PLANES - 1],
-			&u->right_plane_info.stride[HFI_MAX_PLANES - 1]) ||
-			put_user(
-			k->right_plane_info.buf_size[HFI_MAX_PLANES - 1],
-			&u->right_plane_info.buf_size[HFI_MAX_PLANES - 1])
-			|| put_user(k->width, &u->width) ||
-			put_user(k->height, &u->height) ||
-			put_user(k->occlusionmask_enable,
-				&u->occlusionmask_enable) ||
-			put_user(k->occlusioncost, &u->occlusioncost) ||
-			put_user(k->occlusionshift, &u->occlusionshift) ||
-			put_user(k->maxdisparity, &u->maxdisparity) ||
-			put_user(
-				k->disparityoffset, &u->disparityoffset) ||
-			put_user(k->medianfilter_enable,
-				&u->medianfilter_enable) ||
-			put_user(k->occlusionbound, &u->occlusionbound) ||
-			put_user(k->occlusionfilling_enable,
-				&u->occlusionfilling_enable) ||
-			put_user(k->occlusionmaskdump,
-				&u->occlusionmaskdump) ||
-			put_user(k->clientdata.transactionid,
-				&u->clientdata.transactionid) ||
-			put_user(k->clientdata.client_data1,
-				&u->clientdata.client_data1) ||
-			put_user(k->clientdata.client_data2,
-				&u->clientdata.client_data2))
-			return -EFAULT;
-		for (i = 0; i < MAX_DFS_HFI_PARAMS; i++)
-			if (put_user(k->reserved[i], &u->reserved[i]))
+		k = &kp->data.dfs_config;
+		u = &up->data.dfs_config;
+		for (i = 0; i < CVP_DFS_CONFIG_CMD_SIZE; i++)
+			if (put_user(k->cvp_dfs_config[i],
+				&u->cvp_dfs_config[i]))
 				return -EFAULT;
 		break;
 	}
 	case MSM_CVP_HFI_DFS_FRAME_CMD:
 	case MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE:
 	{
-		struct msm_cvp_dfsframe *k, *u;
+		struct msm_cvp_dfs_frame *k, *u;
 
 		dprintk(CVP_DBG, "%s: type = %d\n",
 					__func__, kp->type);
-		k = &kp->data.dfsframe;
-		u = &up->data.dfsframe;
+		k = &kp->data.dfs_frame;
+		u = &up->data.dfs_frame;
 
-		if (put_user(k->cmd_address, &u->cmd_address) ||
-			put_user(k->cmd_size, &u->cmd_size) ||
-			put_user(k->packet_type, &u->packet_type) ||
-			put_user(k->session_id, &u->session_id) ||
-			put_user(k->left_buffer_index,
-				&u->left_buffer_index) ||
-			put_user(k->right_buffer_index,
-				&u->right_buffer_index) ||
-			put_user(k->disparitymap_buffer_idx,
-				&u->disparitymap_buffer_idx) ||
-			put_user(k->occlusionmask_buffer_idx,
-				&u->occlusionmask_buffer_idx) ||
-			put_user(k->clientdata.transactionid,
-				&u->clientdata.transactionid) ||
-			put_user(k->clientdata.client_data1,
-				&u->clientdata.client_data1) ||
-			put_user(k->clientdata.client_data2,
-				&u->clientdata.client_data2))
-			return -EFAULT;
+		for (i = 0; i < CVP_DFS_FRAME_CMD_SIZE; i++)
+			if (put_user(k->frame_data[i], &u->frame_data[i]))
+				return -EFAULT;
+
+		break;
+	}
+	case MSM_CVP_HFI_DME_CONFIG_CMD:
+	{
+		struct msm_cvp_dme_config *k, *u;
+
+		dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DME_CONFIG_CMD\n", __func__);
+		k = &kp->data.dme_config;
+		u = &up->data.dme_config;
+		for (i = 0; i < CVP_DME_CONFIG_CMD_SIZE; i++)
+			if (put_user(k->cvp_dme_config[i],
+				&u->cvp_dme_config[i]))
+				return -EFAULT;
+		break;
+	}
+	case MSM_CVP_HFI_DME_FRAME_CMD:
+	case MSM_CVP_HFI_DME_FRAME_CMD_RESPONSE:
+	{
+		struct msm_cvp_dme_frame *k, *u;
+
+		dprintk(CVP_DBG, "%s: type = %d\n",
+					__func__, kp->type);
+		k = &kp->data.dme_frame;
+		u = &up->data.dme_frame;
+
+		for (i = 0; i < CVP_DME_FRAME_CMD_SIZE; i++)
+			if (put_user(k->frame_data[i], &u->frame_data[i]))
+				return -EFAULT;
+
+		break;
+	}
+	case MSM_CVP_HFI_PERSIST_CMD:
+	case MSM_CVP_HFI_PERSIST_CMD_RESPONSE:
+	{
+		struct msm_cvp_persist_buf *k, *u;
+
+		dprintk(CVP_DBG, "%s: type = %d\n",
+					__func__, kp->type);
+		k = &kp->data.pbuf_cmd;
+		u = &up->data.pbuf_cmd;
+
+		for (i = 0; i < CVP_PERSIST_CMD_SIZE; i++)
+			if (put_user(k->persist_data[i], &u->persist_data[i]))
+				return -EFAULT;
+
 		break;
 	}
 	default:
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index 79cd797..37b403df 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -586,7 +586,7 @@
 
 static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil)
 {
-	int i = 0;
+	int i, rc = 0;
 	struct npu_clk *core_clks = npu_dev->core_clks;
 
 	for (i = npu_dev->core_clk_num - 1; i >= 0 ; i--) {
@@ -598,6 +598,18 @@
 				continue;
 		}
 
+		/* set clock rate to 0 before disabling it */
+		if (!npu_is_exclude_rate_clock(core_clks[i].clk_name)) {
+			pr_debug("setting rate of clock %s to 0\n",
+				core_clks[i].clk_name);
+
+			rc = clk_set_rate(core_clks[i].clk, 0);
+			if (rc) {
+				pr_err("clk_set_rate %s to 0 failed\n",
+					core_clks[i].clk_name);
+			}
+		}
+
 		pr_debug("disabling clock %s\n", core_clks[i].clk_name);
 		clk_disable_unprepare(core_clks[i].clk);
 	}
@@ -744,13 +756,29 @@
 		}
 
 		/* set npu side regs - program SCID */
-		reg_val = NPU_CACHE_ATTR_IDn___POR | SYS_CACHE_SCID;
+		reg_val = REGR(npu_dev, NPU_CACHEMAP0_ATTR_IDn(0));
+		reg_val = (reg_val & ~NPU_CACHEMAP_SCID_MASK) | SYS_CACHE_SCID;
 
-		REGW(npu_dev, NPU_CACHE_ATTR_IDn(0), reg_val);
-		REGW(npu_dev, NPU_CACHE_ATTR_IDn(1), reg_val);
-		REGW(npu_dev, NPU_CACHE_ATTR_IDn(2), reg_val);
-		REGW(npu_dev, NPU_CACHE_ATTR_IDn(3), reg_val);
-		REGW(npu_dev, NPU_CACHE_ATTR_IDn(4), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_IDn(0), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_IDn(1), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_IDn(2), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_IDn(3), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_IDn(4), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_METADATA_IDn(0), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_METADATA_IDn(1), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_METADATA_IDn(2), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_METADATA_IDn(3), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP0_ATTR_METADATA_IDn(4), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_IDn(0), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_IDn(1), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_IDn(2), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_IDn(3), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_IDn(4), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(0), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(1), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(2), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(3), reg_val);
+		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(4), reg_val);
 
 		pr_debug("prior to activate sys cache\n");
 		rc = llcc_slice_activate(npu_dev->sys_cache);
diff --git a/drivers/media/platform/msm/npu/npu_hw.h b/drivers/media/platform/msm/npu/npu_hw.h
index c328a8eb..9894a28 100644
--- a/drivers/media/platform/msm/npu/npu_hw.h
+++ b/drivers/media/platform/msm/npu/npu_hw.h
@@ -12,8 +12,11 @@
  */
 #define NPU_HW_VERSION (0x00000000)
 #define NPU_MASTERn_IPC_IRQ_OUT(n) (0x00001004+0x1000*(n))
-#define NPU_CACHE_ATTR_IDn___POR 0x00011100
-#define NPU_CACHE_ATTR_IDn(n) (0x00000800+0x4*(n))
+#define NPU_CACHEMAP0_ATTR_IDn(n) (0x00000800+0x4*(n))
+#define NPU_CACHEMAP0_ATTR_METADATA_IDn(n) (0x00000814+0x4*(n))
+#define NPU_CACHEMAP1_ATTR_IDn(n) (0x00000830+0x4*(n))
+#define NPU_CACHEMAP1_ATTR_METADATA_IDn(n) (0x00000844+0x4*(n))
+#define NPU_CACHEMAP_SCID_MASK 0x0000001F
 #define NPU_MASTERn_IPC_IRQ_IN_CTRL(n) (0x00001008+0x1000*(n))
 #define NPU_MASTER0_IPC_IRQ_IN_CTRL__IRQ_SOURCE_SELECT___S 4
 #define NPU_MASTERn_IPC_IRQ_OUT_CTRL(n) (0x00001004+0x1000*(n))
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index d1b766e..4cbea60 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -88,14 +88,6 @@
 		goto enable_sys_cache_fail;
 	}
 
-	/* Boot the NPU subsystem */
-	host_ctx->subsystem_handle = subsystem_get_local("npu");
-	if (IS_ERR(host_ctx->subsystem_handle)) {
-		pr_err("pil load npu fw failed\n");
-		ret = -ENODEV;
-		goto subsystem_get_fail;
-	}
-
 	/* Clear control/status registers */
 	REGW(npu_dev, REG_NPU_FW_CTRL_STATUS, 0x0);
 	REGW(npu_dev, REG_NPU_HOST_CTRL_VALUE, 0x0);
@@ -109,31 +101,31 @@
 	if (host_ctx->fw_dbg_mode & FW_DBG_DISABLE_WDOG)
 		reg_val |= HOST_CTRL_STATUS_DISABLE_WDOG_VAL;
 
+	/* Enable clock gating only if the HW access platform allows it */
+	if (npu_hw_clk_gating_enabled())
+		reg_val |= HOST_CTRL_STATUS_BOOT_ENABLE_CLK_GATE_VAL;
+
 	REGW(npu_dev, REG_NPU_HOST_CTRL_STATUS, reg_val);
 	/* Read back to flush all registers for fw to read */
 	REGR(npu_dev, REG_NPU_HOST_CTRL_STATUS);
 
+	/* Initialize the host side IPC before fw boots up */
+	npu_host_ipc_pre_init(npu_dev);
+
+	/* Boot the NPU subsystem */
+	host_ctx->subsystem_handle = subsystem_get_local("npu");
+	if (IS_ERR(host_ctx->subsystem_handle)) {
+		pr_err("pil load npu fw failed\n");
+		ret = -ENODEV;
+		goto subsystem_get_fail;
+	}
+
 	/* Post PIL clocks */
 	if (npu_enable_post_pil_clocks(npu_dev)) {
 		ret = -EPERM;
 		goto enable_post_clk_fail;
 	}
 
-	/*
-	 * Set logging state and clock gating state
-	 * during FW bootup initialization
-	 */
-	reg_val = REGR(npu_dev, REG_NPU_HOST_CTRL_STATUS);
-
-	/* Enable clock gating only if the HW access platform allows it */
-	if (npu_hw_clk_gating_enabled())
-		reg_val |= HOST_CTRL_STATUS_BOOT_ENABLE_CLK_GATE_VAL;
-
-	REGW(npu_dev, REG_NPU_HOST_CTRL_STATUS, reg_val);
-
-	/* Initialize the host side IPC */
-	npu_host_ipc_pre_init(npu_dev);
-
 	/* Keep reading ctrl status until NPU is ready */
 	pr_debug("waiting for status ready from fw\n");
 
@@ -170,11 +162,12 @@
 wait_fw_ready_fail:
 	npu_disable_post_pil_clocks(npu_dev);
 enable_post_clk_fail:
-	subsystem_put_local(host_ctx->subsystem_handle);
 subsystem_get_fail:
-	npu_disable_sys_cache(npu_dev);
 enable_sys_cache_fail:
+	npu_disable_sys_cache(npu_dev);
 	npu_disable_core_power(npu_dev);
+	if (!IS_ERR(host_ctx->subsystem_handle))
+		subsystem_put_local(host_ctx->subsystem_handle);
 enable_pw_fail:
 	host_ctx->fw_state = FW_DISABLED;
 	mutex_unlock(&host_ctx->lock);
@@ -236,8 +229,6 @@
 
 	npu_disable_post_pil_clocks(npu_dev);
 	npu_disable_sys_cache(npu_dev);
-	subsystem_put_local(host_ctx->subsystem_handle);
-	host_ctx->fw_state = FW_DISABLED;
 
 	/*
 	 * if fw is still alive, notify dsp before power off
@@ -251,6 +242,9 @@
 
 	npu_disable_core_power(npu_dev);
 
+	subsystem_put_local(host_ctx->subsystem_handle);
+	host_ctx->fw_state = FW_DISABLED;
+
 	if (ssr) {
 		/* mark all existing network to error state */
 		for (i = 0; i < MAX_LOADED_NETWORK; i++) {
@@ -277,7 +271,6 @@
 	mutex_init(&host_ctx->lock);
 	atomic_set(&host_ctx->ipc_trans_id, 1);
 
-	host_ctx->sys_cache_disable = true;
 	host_ctx->wq = npu_create_wq(host_ctx, "irq_hdl", host_irq_wq,
 		&host_ctx->irq_work);
 	if (!host_ctx->wq)
diff --git a/drivers/media/platform/msm/synx/synx.c b/drivers/media/platform/msm/synx/synx.c
index 80e1a2d..90fef13 100644
--- a/drivers/media/platform/msm/synx/synx.c
+++ b/drivers/media/platform/msm/synx/synx.c
@@ -533,7 +533,7 @@
 		return -EINVAL;
 	}
 
-	if (is_valid_type(external_sync.type)) {
+	if (!is_valid_type(external_sync.type)) {
 		pr_err("invalid external sync object\n");
 		return -EINVAL;
 	}
@@ -656,7 +656,7 @@
 
 	pr_debug("Enter %s\n", __func__);
 
-	if (!synx_obj)
+	if (!new_synx_obj)
 		return -EINVAL;
 
 	row = synx_from_key(synx_obj, secure_key);
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index f193637..aded2b3 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -782,7 +782,7 @@
 	pkt->num_properties = 1;
 	pkt->size += size;
 	pkt->rg_property_data[0] = ptype;
-	if (size)
+	if (size && pdata)
 		memcpy(&pkt->rg_property_data[1], pdata, size);
 
 	dprintk(VIDC_DBG, "Setting HAL Property = 0x%x\n", ptype);
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 90932c3..88f943e 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -733,6 +733,13 @@
 	codecs = sys_init_done->dec_codec_supported;
 	for (i = 0; i < 8 * sizeof(codecs); i++) {
 		if ((1 << i) & codecs) {
+			if (codec_count >= VIDC_MAX_SESSIONS) {
+				dprintk(VIDC_ERR,
+					"%s: codec count exceeding max sessions %d\n",
+					__func__, VIDC_MAX_SESSIONS);
+				break;
+			}
+
 			capability =
 				&sys_init_done->capabilities[codec_count++];
 			capability->codec =
@@ -741,9 +748,17 @@
 				vidc_get_hal_domain(HFI_VIDEO_DOMAIN_DECODER);
 		}
 	}
+
 	codecs = sys_init_done->enc_codec_supported;
 	for (i = 0; i < 8 * sizeof(codecs); i++) {
 		if ((1 << i) & codecs) {
+			if (codec_count >= VIDC_MAX_SESSIONS) {
+				dprintk(VIDC_ERR,
+					"%s: codec count exceeding max sessions %d\n",
+					__func__, VIDC_MAX_SESSIONS);
+				break;
+			}
+
 			capability =
 				&sys_init_done->capabilities[codec_count++];
 			capability->codec =
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 30bd2a4..1663412 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -156,7 +156,6 @@
 		),
 		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
@@ -190,7 +189,6 @@
 		),
 		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
@@ -206,7 +204,6 @@
 		),
 		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
@@ -233,7 +230,6 @@
 		),
 		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_TIER,
@@ -248,7 +244,6 @@
 		),
 		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
@@ -260,7 +255,6 @@
 		.menu_skip_mask = ~(1 << V4L2_MPEG_VIDEO_VP8_PROFILE_0),
 		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL,
@@ -278,7 +272,6 @@
 		),
 		.qmenu = vp8_profile_level,
 		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
@@ -295,7 +288,6 @@
 		),
 		.qmenu = NULL,
 		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_VP9_LEVEL,
@@ -321,7 +313,6 @@
 		),
 		.qmenu = vp9_level,
 		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE,
@@ -336,7 +327,6 @@
 		),
 		.qmenu = mpeg2_profile,
 		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL,
@@ -352,7 +342,6 @@
 		),
 		.qmenu = mpeg2_level,
 		.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT,
@@ -1207,6 +1196,19 @@
 	hdev = inst->core->device;
 
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_SECURE);
+
+	if (ctrl->val) {
+		if (!(inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC ||
+			inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+			inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9 ||
+			inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_MPEG2)) {
+			dprintk(VIDC_ERR,
+				"%s: Secure allowed for HEVC/H264/VP9/MPEG2\n",
+				__func__);
+			return -EINVAL;
+		}
+	}
+
 	dprintk(VIDC_DBG, "%s: %#x\n", __func__, ctrl->val);
 	rc = call_hfi_op(hdev, session_set_property, inst->session,
 		HFI_PROPERTY_PARAM_SECURE_SESSION, &ctrl->val, sizeof(u32));
@@ -1417,6 +1419,8 @@
 		HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA, 0x1);
 	msm_comm_set_extradata(inst, display_info, 0x1);
 	msm_comm_set_extradata(inst,
+		HFI_PROPERTY_PARAM_HDR10_HIST_EXTRADATA, 0x1);
+	msm_comm_set_extradata(inst,
 		HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB, 0x1);
 	if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC) {
 		msm_comm_set_extradata(inst,
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 8960bdc..6924762 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -16,7 +16,11 @@
 #define MIN_BIT_RATE 32000
 #define MAX_BIT_RATE 1200000000
 #define DEFAULT_BIT_RATE 64000
+#define MIN_BIT_RATE_RATIO 0
+#define MAX_BIT_RATE_RATIO 100
+#define MAX_HIER_CODING_LAYER 6
 #define BIT_RATE_STEP 1
+#define MAX_BASE_LAYER_PRIORITY_ID 63
 #define MAX_SLICE_BYTE_SIZE ((MAX_BIT_RATE)>>3)
 #define MIN_SLICE_BYTE_SIZE 512
 #define MAX_SLICE_MB_SIZE (((4096 + 15) >> 4) * ((2304 + 15) >> 4))
@@ -32,6 +36,8 @@
 #define MAX_INTRA_REFRESH_MBS ((7680 * 4320) >> 8)
 #define MAX_LTR_FRAME_COUNT 10
 #define MAX_NUM_B_FRAMES 1
+#define MIN_CBRPLUS_W 1280
+#define MIN_CBRPLUS_H 720
 
 #define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
 #define MIN_NUM_ENC_OUTPUT_BUFFERS 4
@@ -197,7 +203,6 @@
 		.minimum = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
 		.maximum = V4L2_MPEG_VIDEO_BITRATE_MODE_CQ,
 		.default_value = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
-		.step = 0,
 		.menu_skip_mask = ~(
 		(1 << V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
 		(1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) |
@@ -280,7 +285,6 @@
 		),
 		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
@@ -314,7 +318,6 @@
 		),
 		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
@@ -326,7 +329,6 @@
 		.menu_skip_mask = ~(1 << V4L2_MPEG_VIDEO_VP8_PROFILE_0),
 		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL,
@@ -344,7 +346,6 @@
 		),
 		.qmenu = vp8_profile_level,
 		.flags = V4L2_CTRL_FLAG_VOLATILE,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
@@ -360,7 +361,6 @@
 		),
 		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
@@ -388,7 +388,6 @@
 		),
 		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_TIER,
@@ -403,7 +402,6 @@
 		),
 		.flags = V4L2_CTRL_FLAG_VOLATILE,
 		.qmenu = NULL,
-		.step = 1,
 	},
 	{
 		.id = V4L2_CID_ROTATE,
@@ -531,7 +529,8 @@
 		.type = V4L2_CTRL_TYPE_BITMASK,
 		.minimum = EXTRADATA_NONE,
 		.maximum = EXTRADATA_ADVANCED | EXTRADATA_ENC_INPUT_ROI |
-			EXTRADATA_ENC_INPUT_HDR10PLUS,
+			EXTRADATA_ENC_INPUT_HDR10PLUS |
+			EXTRADATA_ENC_INPUT_CVP,
 		.default_value = EXTRADATA_NONE,
 		.menu_skip_mask = 0,
 		.qmenu = NULL,
@@ -589,7 +588,7 @@
 		.name = "Set Hier layers",
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = 0,
-		.maximum = 6,
+		.maximum = MAX_HIER_CODING_LAYER,
 		.default_value = 0,
 		.step = 1,
 		.qmenu = NULL,
@@ -598,19 +597,12 @@
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER,
 		.name = "Set Hier max layers",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 0,
-		.maximum = 6,
-		.default_value = 0,
+		.minimum = V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_0,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_6,
+		.default_value =
+			V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_0,
 		.step = 1,
-		.menu_skip_mask = ~(
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_0) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_1) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_2) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_3) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_4) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_5) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_6)
-		),
+		.menu_skip_mask = 0,
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE,
@@ -619,7 +611,6 @@
 		.minimum = V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P,
 		.maximum = V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P,
 		.default_value = V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P,
-		.step = 1,
 		.menu_skip_mask = ~(
 		(1 << V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P)
 		),
@@ -689,9 +680,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR,
 		.name = "Set layer0 BR",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = MIN_BIT_RATE,
-		.maximum = MAX_BIT_RATE,
-		.default_value = DEFAULT_BIT_RATE,
+		.minimum = MIN_BIT_RATE_RATIO,
+		.maximum = MAX_BIT_RATE_RATIO,
+		.default_value = MIN_BIT_RATE_RATIO,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -699,9 +690,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR,
 		.name = "Set layer1 BR",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = MIN_BIT_RATE,
-		.maximum = MAX_BIT_RATE,
-		.default_value = DEFAULT_BIT_RATE,
+		.minimum = MIN_BIT_RATE_RATIO,
+		.maximum = MAX_BIT_RATE_RATIO,
+		.default_value = MIN_BIT_RATE_RATIO,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -709,9 +700,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR,
 		.name = "Set layer2 BR",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = MIN_BIT_RATE,
-		.maximum = MAX_BIT_RATE,
-		.default_value = DEFAULT_BIT_RATE,
+		.minimum = MIN_BIT_RATE_RATIO,
+		.maximum = MAX_BIT_RATE_RATIO,
+		.default_value = MIN_BIT_RATE_RATIO,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -719,9 +710,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR,
 		.name = "Set layer3 BR",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = MIN_BIT_RATE,
-		.maximum = MAX_BIT_RATE,
-		.default_value = DEFAULT_BIT_RATE,
+		.minimum = MIN_BIT_RATE_RATIO,
+		.maximum = MAX_BIT_RATE_RATIO,
+		.default_value = MIN_BIT_RATE_RATIO,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -729,9 +720,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR,
 		.name = "Set layer4 BR",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = MIN_BIT_RATE,
-		.maximum = MAX_BIT_RATE,
-		.default_value = DEFAULT_BIT_RATE,
+		.minimum = MIN_BIT_RATE_RATIO,
+		.maximum = MAX_BIT_RATE_RATIO,
+		.default_value = MIN_BIT_RATE_RATIO,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -739,9 +730,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR,
 		.name = "Set layer5 BR",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = MIN_BIT_RATE,
-		.maximum = MAX_BIT_RATE,
-		.default_value = DEFAULT_BIT_RATE,
+		.minimum = MIN_BIT_RATE_RATIO,
+		.maximum = MAX_BIT_RATE_RATIO,
+		.default_value = MIN_BIT_RATE_RATIO,
 		.step = 1,
 		.qmenu = NULL,
 	},
@@ -757,10 +748,10 @@
 	},
 	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID,
-		.name = "Set Base Layer ID for Hier-P",
+		.name = "Set Base Layer Priority ID for Hier-P",
 		.type = V4L2_CTRL_TYPE_INTEGER,
 		.minimum = 0,
-		.maximum = 6,
+		.maximum = MAX_BASE_LAYER_PRIORITY_ID,
 		.default_value = 0,
 		.step = 1,
 		.qmenu = NULL,
@@ -938,6 +929,24 @@
 		),
 		.qmenu = mpeg_video_stream_format,
 	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VENC_CVP_DISABLE,
+		.name = "CVP Disable",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_MSM_VIDC_DISABLE,
+		.maximum = V4L2_MPEG_MSM_VIDC_ENABLE,
+		.default_value = V4L2_MPEG_MSM_VIDC_DISABLE,
+		.step = 1,
+	},
+	{
+		.id = V4L2_CID_MPEG_VIDC_VENC_NATIVE_RECORDER,
+		.name = "Enable/Disable Native Recorder",
+		.type = V4L2_CTRL_TYPE_BOOLEAN,
+		.minimum = V4L2_MPEG_MSM_VIDC_DISABLE,
+		.maximum = V4L2_MPEG_MSM_VIDC_ENABLE,
+		.default_value = V4L2_MPEG_MSM_VIDC_DISABLE,
+		.step = 1,
+	},
 };
 
 #define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
@@ -1058,6 +1067,7 @@
 	inst->prop.width[CAPTURE_PORT] = DEFAULT_WIDTH;
 	inst->prop.height[OUTPUT_PORT] = DEFAULT_HEIGHT;
 	inst->prop.width[OUTPUT_PORT] = DEFAULT_WIDTH;
+	inst->prop.bframe_changed = false;
 	inst->capability.height.min = MIN_SUPPORTED_HEIGHT;
 	inst->capability.height.max = DEFAULT_HEIGHT;
 	inst->capability.width.min = MIN_SUPPORTED_WIDTH;
@@ -1106,7 +1116,7 @@
 	}
 
 	buff_req_buffer->buffer_size =
-		msm_vidc_calculate_enc_input_extra_size(inst);
+		msm_vidc_calculate_enc_input_extra_size(inst, 0);
 	inst->bufq[OUTPUT_PORT].plane_sizes[1] =
 		buff_req_buffer->buffer_size;
 
@@ -1222,6 +1232,7 @@
 	int rc = 0;
 	int i = 0;
 	struct msm_vidc_format *fmt = NULL;
+	struct v4l2_ctrl *extradata_ctrl;
 
 	if (!inst || !f) {
 		dprintk(VIDC_ERR,
@@ -1304,8 +1315,11 @@
 		 */
 		inst->bufq[fmt->type].plane_sizes[0] =
 			msm_vidc_calculate_enc_input_frame_size(inst);
+		extradata_ctrl = get_ctrl(inst,
+			V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
 		inst->bufq[fmt->type].plane_sizes[1] =
-			msm_vidc_calculate_enc_input_extra_size(inst);
+			msm_vidc_calculate_enc_input_extra_size(inst,
+				extradata_ctrl->val);
 		f->fmt.pix_mp.num_planes = inst->bufq[fmt->type].num_planes;
 		for (i = 0; i < inst->bufq[fmt->type].num_planes; i++) {
 			f->fmt.pix_mp.plane_fmt[i].sizeimage =
@@ -1367,31 +1381,6 @@
 	return 0;
 }
 
-void msm_venc_adjust_gop_size(struct msm_vidc_inst *inst)
-{
-	struct v4l2_ctrl *hier_ctrl;
-
-	/*
-	 * Layer encoding needs GOP size to be multiple of subgop size
-	 * And subgop size is 2 ^ number of enhancement layers
-	 */
-	hier_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER);
-	if (hier_ctrl->val > 1) {
-		struct v4l2_ctrl *gop_size_ctrl;
-		u32 min_gop_size;
-		u32 num_subgops;
-
-		gop_size_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_GOP_SIZE);
-		min_gop_size = (1 << (hier_ctrl->val - 1));
-		num_subgops = (gop_size_ctrl->val + (min_gop_size >> 1)) /
-				min_gop_size;
-		if (num_subgops)
-			gop_size_ctrl->val = num_subgops * min_gop_size;
-		else
-			gop_size_ctrl->val = min_gop_size;
-	}
-}
-
 int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
 {
 	int rc = 0;
@@ -1601,7 +1590,8 @@
 			}
 
 			buff_req_buffer->buffer_size =
-				msm_vidc_calculate_enc_input_extra_size(inst);
+				msm_vidc_calculate_enc_input_extra_size(inst,
+					ctrl->val);
 			inst->bufq[OUTPUT_PORT].plane_sizes[1] =
 					buff_req_buffer->buffer_size;
 		}
@@ -1689,9 +1679,39 @@
 					__func__);
 		}
 		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID:
+		if (inst->state == MSM_VIDC_START_DONE) {
+			rc = msm_venc_set_base_layer_priority_id(inst);
+			if (rc)
+				dprintk(VIDC_ERR,
+					"%s: set baselayer id failed.\n",
+					__func__);
+		}
+		break;
+	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR:
+	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR:
+	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR:
+	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR:
+	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR:
+	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR:
+		if (inst->state == MSM_VIDC_START_DONE) {
+			rc = msm_venc_set_layer_bitrate(inst);
+			if (rc)
+				dprintk(VIDC_ERR,
+				"%s: set layer bitrate failed\n",
+				__func__);
+		}
+		break;
+	case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+		if (inst->state == MSM_VIDC_START_DONE) {
+			dprintk(VIDC_ERR,
+			"%s: Dynamic setting of Bframe is not supported\n",
+			__func__);
+			return -EINVAL;
+		}
+		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
-	case V4L2_CID_MPEG_VIDEO_B_FRAMES:
 	case V4L2_CID_ROTATE:
 	case V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT:
 	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
@@ -1703,19 +1723,12 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER:
 	case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR:
 	case V4L2_CID_MPEG_VIDC_VIDEO_VPX_ERROR_RESILIENCE:
-	case V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR:
 	case V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE:
 	case V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE:
 	case V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS:
@@ -1731,6 +1744,8 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY:
 	case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM:
 	case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
+	case V4L2_CID_MPEG_VIDC_VENC_CVP_DISABLE:
+	case V4L2_CID_MPEG_VIDC_VENC_NATIVE_RECORDER:
 		dprintk(VIDC_DBG, "Control set: ID : %x Val : %d\n",
 			ctrl->id, ctrl->val);
 		break;
@@ -1905,6 +1920,16 @@
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_SECURE);
 	enable.enable = !!ctrl->val;
 
+	if (enable.enable) {
+		if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+			inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC)) {
+			dprintk(VIDC_ERR,
+				"%s: Secure mode only allowed for HEVC/H264\n",
+				__func__);
+			return -EINVAL;
+		}
+	}
+
 	dprintk(VIDC_DBG, "%s: %d\n", __func__, enable.enable);
 	rc = call_hfi_op(hdev, session_set_property, inst->session,
 		HFI_PROPERTY_PARAM_SECURE_SESSION, &enable, sizeof(enable));
@@ -2022,59 +2047,83 @@
 	return rc;
 }
 
-int msm_venc_set_intra_period(struct msm_vidc_inst *inst)
+void msm_venc_decide_bframe(struct msm_vidc_inst *inst)
 {
-	int rc = 0;
-	struct hfi_device *hdev;
+	u32 width = inst->prop.width[OUTPUT_PORT];
+	u32 height = inst->prop.height[OUTPUT_PORT];
+	u32 num_mbs_per_frame, num_mbs_per_sec;
 	struct v4l2_ctrl *ctrl;
-	struct hfi_intra_period intra_period;
+	struct v4l2_ctrl *bframe_ctrl;
+	struct msm_vidc_platform_resources *res;
 
-	if (!inst || !inst->core) {
-		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
+	res = &inst->core->resources;
+	bframe_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES);
+	num_mbs_per_frame = NUM_MBS_PER_FRAME(width, height);
+	if (num_mbs_per_frame > res->max_bframe_mbs_per_frame)
+		goto disable_bframe;
+
+	num_mbs_per_sec = num_mbs_per_frame *
+		(inst->clk_data.frame_rate >> 16);
+	if (num_mbs_per_sec > res->max_bframe_mbs_per_sec)
+		goto disable_bframe;
+
+	ctrl = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER);
+	if (ctrl->val > 1)
+		goto disable_bframe;
+
+	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT);
+	if (ctrl->val)
+		goto disable_bframe;
+
+	if (inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
+		goto disable_bframe;
+
+	if (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264) {
+		ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_PROFILE);
+		if ((ctrl->val != V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) &&
+			(ctrl->val != V4L2_MPEG_VIDEO_H264_PROFILE_HIGH))
+			goto disable_bframe;
+	} else if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC)
+		goto disable_bframe;
+
+	if (inst->clk_data.low_latency_mode)
+		goto disable_bframe;
+
+	if (!bframe_ctrl->val) {
+		ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_NATIVE_RECORDER);
+		if (ctrl->val) {
+			/*
+			 * Native recorder is enabled and bframe is not enabled
+			 * Hence, forcefully enable bframe
+			 */
+			inst->prop.bframe_changed = true;
+			bframe_ctrl->val = MAX_NUM_B_FRAMES;
+			dprintk(VIDC_DBG, "Bframe is forcefully enabled\n");
+		} else {
+			/*
+			 * Native recorder is not enabled
+			 * B-Frame is not enabled by client
+			 */
+			goto disable_bframe;
+		}
 	}
-	hdev = inst->core->device;
+	dprintk(VIDC_DBG, "Bframe can be enabled!\n");
 
-	msm_venc_adjust_gop_size(inst);
-	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_GOP_SIZE);
-	intra_period.pframes = ctrl->val;
-
-	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES);
-	intra_period.bframes = ctrl->val;
-
-	dprintk(VIDC_DBG, "%s: %d %d\n", __func__, intra_period.pframes,
-		intra_period.bframes);
-	rc = call_hfi_op(hdev, session_set_property, inst->session,
-		HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD, &intra_period,
-		sizeof(intra_period));
-	if (rc) {
-		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
-		return rc;
+	return;
+disable_bframe:
+	if (bframe_ctrl->val) {
+		/*
+		 * Client wanted to enable bframe but,
+		 * conditions to enable are not met
+		 * Hence, forcefully disable bframe
+		 */
+		inst->prop.bframe_changed = true;
+		bframe_ctrl->val = 0;
+		dprintk(VIDC_DBG, "Bframe is forcefully disabled!\n");
+	} else {
+		dprintk(VIDC_DBG, "Bframe is disabled\n");
 	}
-
-	return rc;
-}
-
-int msm_venc_set_request_keyframe(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core) {
-		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-
-	dprintk(VIDC_DBG, "%s\n", __func__);
-	rc = call_hfi_op(hdev, session_set_property, inst->session,
-		HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME, NULL, 0);
-	if (rc) {
-		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
-		return rc;
-	}
-
-	return rc;
 }
 
 int msm_venc_set_adaptive_bframes(struct msm_vidc_inst *inst)
@@ -2100,11 +2149,54 @@
 	return rc;
 }
 
-int msm_venc_set_rate_control(struct msm_vidc_inst *inst)
+void msm_venc_adjust_gop_size(struct msm_vidc_inst *inst)
+{
+	struct v4l2_ctrl *hier_ctrl;
+	struct v4l2_ctrl *bframe_ctrl;
+	struct v4l2_ctrl *gop_size_ctrl;
+
+	gop_size_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_GOP_SIZE);
+	if (inst->prop.bframe_changed) {
+		/*
+		 * BFrame size was explicitly change
+		 * Hence, adjust GOP size accordingly
+		 */
+		bframe_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES);
+		if (!bframe_ctrl->val)
+			/* Forcefully disabled */
+			gop_size_ctrl->val = gop_size_ctrl->val *
+					(1 + MAX_NUM_B_FRAMES);
+		else
+			/* Forcefully enabled */
+			gop_size_ctrl->val = gop_size_ctrl->val /
+					(1 + MAX_NUM_B_FRAMES);
+	}
+
+	/*
+	 * Layer encoding needs GOP size to be multiple of subgop size
+	 * And subgop size is 2 ^ number of enhancement layers
+	 */
+	hier_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER);
+	if (hier_ctrl->val > 1) {
+		u32 min_gop_size;
+		u32 num_subgops;
+
+		min_gop_size = (1 << (hier_ctrl->val - 1));
+		num_subgops = (gop_size_ctrl->val + (min_gop_size >> 1)) /
+				min_gop_size;
+		if (num_subgops)
+			gop_size_ctrl->val = num_subgops * min_gop_size;
+		else
+			gop_size_ctrl->val = min_gop_size;
+	}
+}
+
+int msm_venc_set_intra_period(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
 	struct hfi_device *hdev;
-	u32 hfi_rc;
+	struct v4l2_ctrl *ctrl;
+	struct hfi_intra_period intra_period;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
@@ -2112,6 +2204,116 @@
 	}
 	hdev = inst->core->device;
 
+	msm_venc_adjust_gop_size(inst);
+
+	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_GOP_SIZE);
+	intra_period.pframes = ctrl->val;
+
+	/*
+	 * At this point we have already made decision on bframe
+	 * Control value gives updated bframe value.
+	 */
+	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES);
+	intra_period.bframes = ctrl->val;
+
+	dprintk(VIDC_DBG, "%s: %d %d\n", __func__, intra_period.pframes,
+		intra_period.bframes);
+	rc = call_hfi_op(hdev, session_set_property, inst->session,
+		HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD, &intra_period,
+		sizeof(intra_period));
+	if (rc) {
+		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
+		return rc;
+	}
+
+	if (intra_period.bframes) {
+		/* Enable adaptive bframes as nbframes!= 0 */
+		rc = msm_venc_set_adaptive_bframes(inst);
+		if (rc) {
+			dprintk(VIDC_ERR, "%s: set property failed\n",
+				__func__);
+			return rc;
+		}
+	}
+	return rc;
+}
+
+int msm_venc_set_request_keyframe(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	dprintk(VIDC_DBG, "%s\n", __func__);
+	rc = call_hfi_op(hdev, session_set_property, inst->session,
+		HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME, NULL, 0);
+	if (rc) {
+		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
+		return rc;
+	}
+
+	return rc;
+}
+
+int msm_venc_set_rate_control(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	u32 hfi_rc, codec;
+	u32 height, width, mbpf;
+	struct hfi_vbv_hrd_buf_size hrd_buf_size;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+
+	hdev = inst->core->device;
+	inst->clk_data.is_cbr_plus = false;
+	codec = inst->fmts[CAPTURE_PORT].fourcc;
+	height = inst->prop.height[OUTPUT_PORT];
+	width = inst->prop.width[OUTPUT_PORT];
+	mbpf = NUM_MBS_PER_FRAME(height, width);
+
+	if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_MBR_VFR)
+		inst->rc_type = V4L2_MPEG_VIDEO_BITRATE_MODE_MBR;
+	else if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR &&
+			   inst->clk_data.low_latency_mode)
+		inst->rc_type = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
+
+	if ((inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR ||
+		inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR) &&
+		(codec != V4L2_PIX_FMT_VP8)) {
+		hrd_buf_size.vbv_hrd_buf_size = 500;
+		inst->clk_data.low_latency_mode = true;
+
+		if ((width > MIN_CBRPLUS_W && height > MIN_CBRPLUS_H) ||
+			(width > MIN_CBRPLUS_H && height > MIN_CBRPLUS_W) ||
+			mbpf > NUM_MBS_PER_FRAME(720, 1280)) {
+			hrd_buf_size.vbv_hrd_buf_size = 1000;
+			inst->clk_data.is_cbr_plus = true;
+		}
+
+		dprintk(VIDC_DBG, "Set hrd_buf_size %d",
+				hrd_buf_size.vbv_hrd_buf_size);
+
+		rc = call_hfi_op(hdev, session_set_property,
+			(void *)inst->session,
+			HFI_PROPERTY_CONFIG_VENC_VBV_HRD_BUF_SIZE,
+			(void *)&hrd_buf_size, sizeof(hrd_buf_size));
+		if (rc) {
+			dprintk(VIDC_ERR, "%s: set HRD_BUF_SIZE %u failed\n",
+					__func__,
+					hrd_buf_size.vbv_hrd_buf_size);
+			inst->clk_data.is_cbr_plus = false;
+		}
+	}
+
 	switch (inst->rc_type) {
 	case RATE_CONTROL_OFF:
 		hfi_rc = HFI_RATE_CONTROL_OFF;
@@ -2128,9 +2330,6 @@
 	case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR:
 		hfi_rc = HFI_RATE_CONTROL_CBR_VFR;
 		break;
-	case V4L2_MPEG_VIDEO_BITRATE_MODE_MBR_VFR:
-		hfi_rc = HFI_RATE_CONTROL_MBR_VFR;
-		break;
 	case V4L2_MPEG_VIDEO_BITRATE_MODE_CQ:
 		hfi_rc = HFI_RATE_CONTROL_CQ;
 		break;
@@ -2188,6 +2387,7 @@
 	struct hfi_device *hdev;
 	struct v4l2_ctrl *ctrl;
 	struct hfi_bitrate bitrate;
+	struct hfi_enable enable;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
@@ -2195,6 +2395,22 @@
 	}
 	hdev = inst->core->device;
 
+	if (inst->layer_bitrate) {
+		dprintk(VIDC_DBG, "%s: Layer bitrate is enabled\n", __func__);
+		return 0;
+	}
+
+	enable.enable = 0;
+	dprintk(VIDC_DBG, "%s: bitrate type: %d\n",
+		__func__, enable.enable);
+	rc = call_hfi_op(hdev, session_set_property, inst->session,
+		HFI_PROPERTY_PARAM_VENC_BITRATE_TYPE, &enable,
+		sizeof(enable));
+	if (rc) {
+		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
+		return rc;
+	}
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE);
 	bitrate.bit_rate = ctrl->val;
 	bitrate.layer_id = MSM_VIDC_ALL_LAYER_ID;
@@ -2208,6 +2424,112 @@
 	return rc;
 }
 
+int msm_venc_set_layer_bitrate(struct msm_vidc_inst *inst)
+{
+	int rc = 0, i = 0;
+	struct hfi_device *hdev;
+	struct v4l2_ctrl *bitrate = NULL;
+	struct v4l2_ctrl *layer = NULL;
+	struct v4l2_ctrl *max_layer = NULL;
+	struct v4l2_ctrl *layer_br_ratios[MAX_HIER_CODING_LAYER] = {NULL};
+	struct hfi_bitrate layer_br;
+	struct hfi_enable enable;
+
+	if (!inst || !inst->core) {
+		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+
+	max_layer = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER);
+	layer = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER);
+
+	if (!max_layer->val || !layer->val) {
+		dprintk(VIDC_DBG,
+			"%s: Hierp layer not set. Ignore layer bitrate\n",
+			__func__);
+		goto error;
+	}
+
+	if (max_layer->val < layer->val) {
+		dprintk(VIDC_DBG,
+			"%s: Hierp layer greater than max isn't allowed\n",
+			__func__);
+		goto error;
+	}
+
+	layer_br_ratios[0] = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR);
+	layer_br_ratios[1] = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR);
+	layer_br_ratios[2] = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR);
+	layer_br_ratios[3] = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR);
+	layer_br_ratios[4] = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR);
+	layer_br_ratios[5] = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR);
+
+	/* Set layer bitrates only when highest layer br ratio is 100. */
+	if (layer_br_ratios[layer->val-1]->val != MAX_BIT_RATE_RATIO ||
+		layer_br_ratios[0]->val == 0) {
+		dprintk(VIDC_DBG,
+			"%s: Improper layer bitrate ratio\n",
+			__func__);
+		goto error;
+	}
+
+	for (i = layer->val - 1; i > 0; --i) {
+		if (layer_br_ratios[i]->val == 0) {
+			dprintk(VIDC_DBG,
+				"%s: Layer ratio must be non-zero\n",
+				__func__);
+			goto error;
+		}
+		layer_br_ratios[i]->val -= layer_br_ratios[i-1]->val;
+	}
+
+	enable.enable = 1;
+	dprintk(VIDC_DBG, "%s: %d\n", __func__, enable.enable);
+	rc = call_hfi_op(hdev, session_set_property, inst->session,
+		HFI_PROPERTY_PARAM_VENC_BITRATE_TYPE, &enable,
+		sizeof(enable));
+	if (rc) {
+		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
+		goto error;
+	}
+
+	bitrate = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE);
+	for (i = 0; i < layer->val; ++i) {
+		layer_br.bit_rate =
+			bitrate->val * layer_br_ratios[i]->val / 100;
+		layer_br.layer_id = i;
+		dprintk(VIDC_DBG,
+			"%s: Bitrate for Layer[%u]: [%u]\n",
+			__func__, layer_br.layer_id, layer_br.bit_rate);
+
+		rc = call_hfi_op(hdev, session_set_property, inst->session,
+			HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE, &layer_br,
+			sizeof(layer_br));
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s: set property failed for layer: %u\n",
+				__func__, layer_br.layer_id);
+			goto error;
+		}
+	}
+
+	inst->layer_bitrate = true;
+	return rc;
+
+error:
+	inst->layer_bitrate = false;
+	return rc;
+}
+
 int msm_venc_set_frame_qp(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
@@ -2423,35 +2745,88 @@
 	struct v4l2_ctrl *ctrl_t;
 	struct hfi_multi_slice_control multi_slice_control;
 	int temp = 0;
+	u32 mb_per_frame, fps, mbps, bitrate;
+	u32 slice_val, slice_mode, max_avg_slicesize;
+	u32 rc_mode, output_width, output_height;
+	struct v4l2_ctrl *rc_enable;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
-	hdev = inst->core->device;
 
 	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC &&
 		inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264)
 		return 0;
 
+	slice_mode = HFI_MULTI_SLICE_OFF;
+	slice_val = 0;
+
+	bitrate = inst->clk_data.bitrate;
+	fps = inst->clk_data.frame_rate;
+	rc_mode = inst->rc_type;
+	rc_enable = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE);
+	if (fps > 60 ||
+		(rc_enable->val &&
+		 rc_mode != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR &&
+		 rc_mode != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)) {
+		goto set_and_exit;
+	}
+
+	output_width = inst->prop.width[OUTPUT_PORT];
+	output_height = inst->prop.height[OUTPUT_PORT];
+
+	if (output_height < 128 ||
+		(inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC &&
+		 output_width < 384) ||
+		(inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264 &&
+		 output_width < 192)) {
+		goto set_and_exit;
+	}
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE);
-	multi_slice_control.multi_slice = HFI_MULTI_SLICE_OFF;
-	temp = 0;
 	if (ctrl->val == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
 		temp = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB;
-		multi_slice_control.multi_slice = HFI_MULTI_SLICE_BY_MB_COUNT;
+		slice_mode = HFI_MULTI_SLICE_BY_MB_COUNT;
 	} else if (ctrl->val == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
 		temp = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES;
-		multi_slice_control.multi_slice =
-			HFI_MULTI_SLICE_BY_BYTE_COUNT;
+		slice_mode = HFI_MULTI_SLICE_BY_BYTE_COUNT;
+	} else {
+		goto set_and_exit;
 	}
 
-	multi_slice_control.slice_size = 0;
-	if (temp) {
-		ctrl_t = get_ctrl(inst, temp);
-		multi_slice_control.slice_size = ctrl_t->val;
+	ctrl_t = get_ctrl(inst, temp);
+	slice_val = ctrl_t->val;
+
+	/* Update Slice Config */
+	mb_per_frame = NUM_MBS_PER_FRAME(output_height, output_width);
+	mbps = NUM_MBS_PER_SEC(output_height, output_width, fps);
+
+	if (slice_mode == HFI_MULTI_SLICE_BY_MB_COUNT) {
+		if (output_width <= 4096 || output_height <= 4096 ||
+			mb_per_frame <= NUM_MBS_PER_FRAME(4096, 2160) ||
+			mbps <= NUM_MBS_PER_SEC(4096, 2160, 60)) {
+			slice_val = max(slice_val, mb_per_frame / 10);
+		}
+	} else {
+		if (output_width <= 1920 || output_height <= 1920 ||
+			mb_per_frame <= NUM_MBS_PER_FRAME(1088, 1920) ||
+			mbps <= NUM_MBS_PER_SEC(1088, 1920, 60)) {
+			max_avg_slicesize = ((bitrate / fps) / 8) / 10;
+			slice_val = max(slice_val, max_avg_slicesize);
+		}
 	}
 
+	if (slice_mode == HFI_MULTI_SLICE_OFF) {
+		ctrl->val = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE;
+		ctrl_t->val = 0;
+	}
+
+set_and_exit:
+	multi_slice_control.multi_slice = slice_mode;
+	multi_slice_control.slice_size = slice_val;
+
+	hdev = inst->core->device;
 	dprintk(VIDC_DBG, "%s: %d %d\n", __func__,
 			multi_slice_control.multi_slice,
 			multi_slice_control.slice_size);
@@ -2468,7 +2843,8 @@
 {
 	int rc = 0;
 	struct hfi_device *hdev;
-	struct v4l2_ctrl *ctrl;
+	struct v4l2_ctrl *ctrl = NULL;
+	struct v4l2_ctrl *rc_mode = NULL;
 	struct hfi_intra_refresh intra_refresh;
 
 	if (!inst || !inst->core) {
@@ -2477,6 +2853,11 @@
 	}
 	hdev = inst->core->device;
 
+	rc_mode = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE_MODE);
+	if (!(rc_mode->val == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR ||
+		rc_mode->val == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR))
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM);
 	intra_refresh.mbs = 0;
 	if (ctrl->val) {
@@ -2520,6 +2901,9 @@
 	}
 	hdev = inst->core->device;
 
+	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264)
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE);
 	ctrl_a = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA);
 	ctrl_b = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA);
@@ -2554,6 +2938,10 @@
 	}
 	hdev = inst->core->device;
 
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC))
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR);
 	if (ctrl->val)
 		enable.enable = true;
@@ -2583,6 +2971,10 @@
 	}
 	hdev = inst->core->device;
 
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC))
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER);
 	enable.enable = !!ctrl->val;
 
@@ -2612,10 +3004,6 @@
 	if (inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
 		return 0;
 
-	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_B_FRAMES);
-	if (ctrl->val)
-		return 0;
-
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT);
 	if (ctrl->val)
 		return 0;
@@ -2629,6 +3017,8 @@
 	/*
 	 * Hybrid HP is enabled only for H264 when
 	 * LTR and B-frame are both disabled,
+	 * Layer encoding has higher priority over B-frame
+	 * Hence, no need to check for B-frame
 	 * Rate control type is VBR and
 	 * Max layer equals layer count.
 	 */
@@ -2638,11 +3028,12 @@
 	return 0;
 }
 
-int msm_venc_set_base_layer_id(struct msm_vidc_inst *inst)
+int msm_venc_set_base_layer_priority_id(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
 	struct hfi_device *hdev;
-	struct v4l2_ctrl *ctrl;
+	struct v4l2_ctrl *ctrl = NULL;
+	struct v4l2_ctrl *max_layer = NULL;
 	u32 baselayerid;
 
 	if (!inst || !inst->core) {
@@ -2651,9 +3042,13 @@
 	}
 	hdev = inst->core->device;
 
-	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264 &&
-		inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC)
+	max_layer = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER);
+	if (max_layer->val <= 0) {
+		dprintk(VIDC_DBG, "%s: Layer id can only be set with Hierp\n",
+			__func__);
 		return 0;
+	}
 
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_BASELAYER_ID);
 	baselayerid = ctrl->val;
@@ -2703,10 +3098,14 @@
 		hp_layer = ctrl->val - 1;
 
 	if (inst->hybrid_hp) {
+		dprintk(VIDC_DBG, "%s: Hybrid hierp layer: %d\n",
+			__func__, hp_layer);
 		rc = call_hfi_op(hdev, session_set_property, inst->session,
 			HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE,
 			&hp_layer, sizeof(hp_layer));
 	} else {
+		dprintk(VIDC_DBG, "%s: Hierp max layer: %d\n",
+			__func__, hp_layer);
 		rc = call_hfi_op(hdev, session_set_property, inst->session,
 			HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER,
 			&hp_layer, sizeof(hp_layer));
@@ -2722,6 +3121,7 @@
 	int rc = 0;
 	struct hfi_device *hdev;
 	struct v4l2_ctrl *ctrl = NULL;
+	struct v4l2_ctrl *max_layer = NULL;
 	u32 hp_layer = 0;
 
 	if (!inst || !inst->core) {
@@ -2741,8 +3141,18 @@
 		return 0;
 	}
 
+	max_layer = get_ctrl(inst,
+		V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER);
 	ctrl = get_ctrl(inst,
 		V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER);
+
+	if (max_layer->val < ctrl->val) {
+		dprintk(VIDC_WARN,
+			"%s: HP layer count greater than max isn't allowed\n",
+			__func__);
+		return 0;
+	}
+
 	/*
 	 * We send enhancement layer count to FW,
 	 * hence, input 0/1 indicates absence of layer encoding.
@@ -2750,7 +3160,8 @@
 	if (ctrl->val)
 		hp_layer = ctrl->val - 1;
 
-	dprintk(VIDC_DBG, "%s: HP layer: %d\n", __func__, hp_layer);
+	dprintk(VIDC_DBG, "%s: Hierp enhancement layer: %d\n",
+		__func__, hp_layer);
 	rc = call_hfi_op(hdev, session_set_property, inst->session,
 		HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER,
 		&hp_layer, sizeof(hp_layer));
@@ -2806,7 +3217,8 @@
 	}
 	hdev = inst->core->device;
 
-	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264)
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC))
 		return 0;
 
 	ctrl_cs = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE);
@@ -2876,8 +3288,8 @@
 {
 	int rc = 0;
 	struct hfi_device *hdev;
-	struct v4l2_ctrl *ctrl;
-	struct v4l2_ctrl *profile;
+	struct v4l2_ctrl *ctrl = NULL;
+	struct v4l2_ctrl *profile = NULL;
 	struct hfi_enable enable;
 
 	if (!inst || !inst->core) {
@@ -2886,17 +3298,13 @@
 	}
 	hdev = inst->core->device;
 
-	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264 &&
-		inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC)
+	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264)
 		return 0;
 
-	if (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264) {
-		profile = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_PROFILE);
-		if (profile->val == V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE ||
-			profile->val ==
-			V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE)
-			return 0;
-	}
+	profile = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_PROFILE);
+	if (!(profile->val == V4L2_MPEG_VIDEO_H264_PROFILE_HIGH ||
+		profile->val == V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH))
+		return 0;
 
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM);
 	enable.enable = !!ctrl->val;
@@ -3020,6 +3428,10 @@
 	}
 	hdev = inst->core->device;
 
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264))
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT);
 	if (!ctrl->val)
 		return 0;
@@ -3054,6 +3466,10 @@
 	}
 	hdev = inst->core->device;
 
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264))
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME);
 	use_ltr.ref_ltr = ctrl->val;
 	use_ltr.use_constrnt = false;
@@ -3081,6 +3497,10 @@
 	}
 	hdev = inst->core->device;
 
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264))
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_MARKLTRFRAME);
 	mark_ltr.mark_frame = ctrl->val;
 
@@ -3145,6 +3565,10 @@
 	}
 	hdev = inst->core->device;
 
+	if (!(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+		inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC))
+		return 0;
+
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH);
 	if (!ctrl->val)
 		return 0;
@@ -3195,6 +3619,7 @@
 int msm_venc_set_hdr_info(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
+	struct v4l2_ctrl *profile = NULL;
 	struct hfi_device *hdev;
 
 	if (!inst || !inst->core) {
@@ -3203,6 +3628,13 @@
 	}
 	hdev = inst->core->device;
 
+	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC)
+		return 0;
+
+	profile = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_PROFILE);
+	if (profile->val != V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10)
+		return 0;
+
 	/* No conversion to HFI needed as both structures are same */
 	dprintk(VIDC_DBG, "%s: setting hdr info\n", __func__);
 	rc = call_hfi_op(hdev, session_set_property, inst->session,
@@ -3218,6 +3650,8 @@
 {
 	int rc = 0;
 	struct v4l2_ctrl *ctrl;
+	struct v4l2_ctrl *cvp_ctrl;
+	u32 value = 0x0;
 
 	ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
 	if (ctrl->val == EXTRADATA_NONE) {
@@ -3235,10 +3669,6 @@
 		}
 	}
 
-	/* Always enable default extradata */
-	rc = msm_comm_set_extradata(inst,
-			HFI_PROPERTY_PARAM_VENC_CVP_METADATA_EXTRADATA, 0x1);
-
 	if (ctrl->val & EXTRADATA_ADVANCED)
 		// Enable Advanced Extradata - LTR Info
 		msm_comm_set_extradata(inst,
@@ -3258,6 +3688,26 @@
 		}
 	}
 
+	cvp_ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_CVP_DISABLE);
+	if (cvp_ctrl->val == V4L2_MPEG_MSM_VIDC_ENABLE) {
+		if (ctrl->val & EXTRADATA_ENC_INPUT_CVP) {
+			dprintk(VIDC_ERR,
+				"%s: invalid params\n", __func__);
+			return -EINVAL;
+		}
+	} else {
+		/*
+		 * For now, enable CVP metadata only if client provides it.
+		 * Once the kernel-mode CVP metadata implementation
+		 * is completed, this condition should be removed.
+		 */
+		if (ctrl->val & EXTRADATA_ENC_INPUT_CVP)
+			value = 0x1;
+
+	}
+	rc = msm_comm_set_extradata(inst,
+		HFI_PROPERTY_PARAM_VENC_CVP_METADATA_EXTRADATA, value);
+
 	return rc;
 }
 
@@ -3271,37 +3721,28 @@
 	rc = msm_venc_set_frame_rate(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_color_format(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_buffer_counts(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_operating_rate(inst);
-	if (rc)
-		goto exit;
 	rc = msm_venc_set_secure_mode(inst);
 	if (rc)
 		goto exit;
 	rc = msm_venc_set_priority(inst);
 	if (rc)
 		goto exit;
+	rc = msm_venc_set_color_format(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_sequence_header_mode(inst);
+	if (rc)
+		goto exit;
 	rc = msm_venc_set_profile_level(inst);
 	if (rc)
 		goto exit;
-	/*
-	 * set adaptive bframes before intra period as
-	 * intra period setting may enable adaptive bframes
-	 * if bframes are present (even though client might not
-	 * have enabled adaptive bframes setting)
-	 */
-	rc = msm_venc_set_adaptive_bframes(inst);
+	rc = msm_venc_set_8x8_transform(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_intra_period(inst);
+	rc = msm_venc_set_bitrate(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_idr_period(inst);
+	rc = msm_venc_set_entropy_mode(inst);
 	if (rc)
 		goto exit;
 	rc = msm_venc_set_rate_control(inst);
@@ -3310,9 +3751,6 @@
 	rc = msm_venc_set_input_timestamp_rc(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_bitrate(inst);
-	if (rc)
-		goto exit;
 	rc = msm_venc_set_frame_qp(inst);
 	if (rc)
 		goto exit;
@@ -3325,45 +3763,30 @@
 	rc = msm_venc_set_grid(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_entropy_mode(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_slice_control_mode(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_intra_refresh_mode(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_loop_filter_mode(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_sequence_header_mode(inst);
-	if (rc)
-		goto exit;
 	rc = msm_venc_set_au_delimiter_mode(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_base_layer_id(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_vpx_error_resilience(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_video_signal_info(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_video_csc(inst);
-	if (rc)
-		goto exit;
-	rc = msm_venc_set_8x8_transform(inst);
-	if (rc)
-		goto exit;
 	rc = msm_venc_set_vui_timing_info(inst);
 	if (rc)
 		goto exit;
+	rc = msm_venc_set_hdr_info(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_vpx_error_resilience(inst);
+	if (rc)
+		goto exit;
 	rc = msm_venc_set_nal_stream_format(inst);
 	if (rc)
 		goto exit;
+	rc = msm_venc_set_slice_control_mode(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_loop_filter_mode(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_intra_refresh_mode(inst);
+	if (rc)
+		goto exit;
 	rc = msm_venc_set_ltr_mode(inst);
 	if (rc)
 		goto exit;
@@ -3373,19 +3796,47 @@
 	rc = msm_venc_set_hp_layer(inst);
 	if (rc)
 		goto exit;
+	rc = msm_venc_set_base_layer_priority_id(inst);
+	if (rc)
+		goto exit;
+	msm_venc_decide_bframe(inst);
+	rc = msm_venc_set_idr_period(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_intra_period(inst);
+	if (rc)
+		goto exit;
 	rc = msm_venc_set_aspect_ratio(inst);
 	if (rc)
 		goto exit;
+	rc = msm_venc_set_video_signal_info(inst);
+	if (rc)
+		goto exit;
+	/*
+	 * Layer bitrate is preferred over cumulative bitrate.
+	 * Cumulative bitrate is set only when we fall back.
+	 */
+	rc = msm_venc_set_layer_bitrate(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_bitrate(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_video_csc(inst);
+	if (rc)
+		goto exit;
 	rc = msm_venc_set_blur_resolution(inst);
 	if (rc)
 		goto exit;
-	rc = msm_venc_set_hdr_info(inst);
-	if (rc)
-		goto exit;
 	rc = msm_venc_set_extradata(inst);
 	if (rc)
 		goto exit;
-
+	rc = msm_venc_set_operating_rate(inst);
+	if (rc)
+		goto exit;
+	rc = msm_venc_set_buffer_counts(inst);
+	if (rc)
+		goto exit;
 exit:
 	if (rc)
 		dprintk(VIDC_ERR, "%s: failed with %d\n", __func__, rc);
diff --git a/drivers/media/platform/msm/vidc/msm_venc.h b/drivers/media/platform/msm/vidc/msm_venc.h
index 227cd5b..9b45320 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.h
+++ b/drivers/media/platform/msm/vidc/msm_venc.h
@@ -22,6 +22,7 @@
 int msm_venc_set_extradata(struct msm_vidc_inst *inst);
 int msm_venc_set_frame_rate(struct msm_vidc_inst *inst);
 int msm_venc_set_bitrate(struct msm_vidc_inst *inst);
+int msm_venc_set_layer_bitrate(struct msm_vidc_inst *inst);
 int msm_venc_set_operating_rate(struct msm_vidc_inst *inst);
 int msm_venc_set_idr_period(struct msm_vidc_inst *inst);
 int msm_venc_set_intra_period(struct msm_vidc_inst *inst);
@@ -32,4 +33,5 @@
 int msm_venc_set_intra_refresh_mode(struct msm_vidc_inst *inst);
 int msm_venc_set_hp_max_layer(struct msm_vidc_inst *inst);
 int msm_venc_set_hp_layer(struct msm_vidc_inst *inst);
+int msm_venc_set_base_layer_priority_id(struct msm_vidc_inst *inst);
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 39abb87..cf2d7f1 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -204,6 +204,9 @@
 		}
 		break;
 	}
+	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER:
+		msm_vidc_ctrl_get_range(ctrl, &inst->capability.hier_p);
+		break;
 	default:
 		rc = -EINVAL;
 	}
@@ -794,64 +797,6 @@
 	return rc;
 }
 
-int msm_vidc_set_internal_config(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-	u32 rc_mode = RATE_CONTROL_OFF;
-	struct hfi_vbv_hdr_buf_size hrd_buf_size;
-	struct hfi_enable latency;
-	struct hfi_device *hdev;
-	u32 codec;
-	u32 mbps, fps;
-	u32 output_width, output_height;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__);
-		return -EINVAL;
-	}
-
-	if (inst->session_type != MSM_VIDC_ENCODER)
-		return rc;
-
-	hdev = inst->core->device;
-
-	codec = inst->fmts[CAPTURE_PORT].fourcc;
-	latency.enable =  msm_comm_g_ctrl_for_id(inst,
-			V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE);
-
-	if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_MBR_VFR)
-		rc_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_MBR;
-	else if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR &&
-			   latency.enable == V4L2_MPEG_MSM_VIDC_ENABLE &&
-			   codec != V4L2_PIX_FMT_VP8)
-		rc_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
-
-	output_height = inst->prop.height[CAPTURE_PORT];
-	output_width = inst->prop.width[CAPTURE_PORT];
-	fps = inst->clk_data.frame_rate >> 16;
-	mbps = NUM_MBS_PER_SEC(output_height, output_width, fps);
-	if ((rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR ||
-		 rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR) &&
-		(codec != V4L2_PIX_FMT_VP8)) {
-		if ((rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR &&
-		    mbps <= CBR_MB_LIMIT) ||
-		   (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR &&
-		    mbps <= CBR_VFR_MB_LIMIT))
-			hrd_buf_size.vbv_hdr_buf_size = 500;
-		else
-			hrd_buf_size.vbv_hdr_buf_size = 1000;
-		dprintk(VIDC_DBG, "Enable hdr_buf_size %d :\n",
-				hrd_buf_size.vbv_hdr_buf_size);
-		rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session,
-			HFI_PROPERTY_CONFIG_VENC_VBV_HRD_BUF_SIZE,
-			(void *)&hrd_buf_size, sizeof(hrd_buf_size));
-		inst->clk_data.low_latency_mode = true;
-	}
-
-	return rc;
-}
-
 static int msm_vidc_set_rotation(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
@@ -994,14 +939,6 @@
 		goto fail_start;
 	}
 
-	rc = msm_vidc_set_internal_config(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Set internal config failed %pK\n", inst);
-		goto fail_start;
-	}
-
-
 	/* Decide work mode for current session */
 	rc = call_core_op(inst->core, decide_work_mode, inst);
 	if (rc) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
index 949a8a9..547ec1e 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
@@ -238,6 +238,7 @@
 #define CCE_TILE_OFFSET_SIZE ALIGN(32 * 4 * 4, BUFFER_ALIGNMENT_SIZE(32))
 
 #define QMATRIX_SIZE (sizeof(u32) * 128 + 256)
+#define MP2D_QPDUMP_SIZE 115200
 
 #define HFI_IRIS2_ENC_PERSIST_SIZE 102400
 
@@ -252,6 +253,7 @@
 #define SYSTEM_LAL_TILE10 192
 #define NUM_MBS_720P (((1280 + 15) >> 4) * ((720 + 15) >> 4))
 #define NUM_MBS_4k (((4096 + 15) >> 4) * ((2304 + 15) >> 4))
+#define MB_SIZE_IN_PIXEL (16 * 16)
 
 static inline u32 calculate_h264d_scratch_size(struct msm_vidc_inst *inst,
 	u32 width, u32 height, bool is_interlaced);
@@ -584,8 +586,9 @@
 	inst->buffer_size_calculators = NULL;
 	core = inst->core;
 
-	/* Change this to IRIS2 once firmware is ready with changes */
-	if (core->platform_data->vpu_ver == VPU_VERSION_AR50)
+	/* Only decoder is enabled for now */
+	if ((core->platform_data->vpu_ver == VPU_VERSION_IRIS2) &&
+		(inst->session_type == MSM_VIDC_DECODER))
 		inst->buffer_size_calculators =
 			msm_vidc_calculate_internal_buffer_sizes;
 }
@@ -765,7 +768,7 @@
 			div_factor = 2;
 	}
 
-	frame_size = base_res_mbs * 3 / 2 / div_factor;
+	frame_size = base_res_mbs * MB_SIZE_IN_PIXEL * 3 / 2 / div_factor;
 	 /* multiply by 10/8 (1.25) to get size for 10 bit case */
 	if ((inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9) ||
 		(inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC))
@@ -854,18 +857,14 @@
 	return (((lcu_width + 7) >> 3) << 3) * lcu_height * 2;
 }
 
-u32 msm_vidc_calculate_enc_input_extra_size(struct msm_vidc_inst *inst)
+u32 msm_vidc_calculate_enc_input_extra_size(struct msm_vidc_inst *inst,
+	u32 extra_types)
 {
 	u32 size = 0;
 	u32 width = inst->prop.width[OUTPUT_PORT];
 	u32 height = inst->prop.height[OUTPUT_PORT];
 	u32 extradata_count = 0;
-	u32 extra_types;
-	struct v4l2_ctrl *extradata_ctrl;
 
-	extradata_ctrl = get_ctrl(inst,
-			V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
-	extra_types = extradata_ctrl->val;
 	/* Add size for default extradata */
 	size += sizeof(struct msm_vidc_enc_cvp_metadata_payload);
 	extradata_count++;
@@ -1717,5 +1716,5 @@
 
 static inline u32 calculate_mpeg2d_persist1_size(void)
 {
-	return QMATRIX_SIZE;
+	return QMATRIX_SIZE + MP2D_QPDUMP_SIZE;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h
index cddae12..74712f7 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h
@@ -34,7 +34,8 @@
 u32 msm_vidc_calculate_dec_output_extra_size(struct msm_vidc_inst *inst);
 u32 msm_vidc_calculate_enc_input_frame_size(struct msm_vidc_inst *inst);
 u32 msm_vidc_calculate_enc_output_frame_size(struct msm_vidc_inst *inst);
-u32 msm_vidc_calculate_enc_input_extra_size(struct msm_vidc_inst *inst);
+u32 msm_vidc_calculate_enc_input_extra_size(struct msm_vidc_inst *inst,
+	u32 extra_types);
 u32 msm_vidc_calculate_enc_output_extra_size(struct msm_vidc_inst *inst);
 u32 msm_vidc_set_buffer_count_for_thumbnail(struct msm_vidc_inst *inst);
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 20670d7..b0006ac 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -58,7 +58,7 @@
 	struct ubwc_cr_stats_info_type ubwc_stats_info)
 {
 	unsigned long sum = 0, weighted_sum = 0;
-	unsigned long compression_ratio = 1 << 16;
+	unsigned long compression_ratio = 0;
 
 	weighted_sum =
 		32  * ubwc_stats_info.cr_stats_info0 +
@@ -1272,6 +1272,7 @@
 	int rc = 0;
 	struct hfi_device *hdev;
 	struct hfi_video_work_route pdata;
+	bool cbr_plus;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_ERR,
@@ -1281,45 +1282,39 @@
 	}
 
 	hdev = inst->core->device;
-
+	cbr_plus = inst->clk_data.is_cbr_plus;
 	pdata.video_work_route = 4;
+
 	if (inst->session_type == MSM_VIDC_DECODER) {
 		if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_MPEG2 ||
 			inst->pic_struct != MSM_VIDC_PIC_STRUCT_PROGRESSIVE)
 			pdata.video_work_route = 1;
 	} else if (inst->session_type == MSM_VIDC_ENCODER) {
-		u32 slice_mode, rc_mode;
-		u32 output_width, output_height, fps, mbps;
-		bool cbr_plus;
+		u32 slice_mode, output_width, output_height, num_mbs;
+		bool is_1080p_above;
 
-		if (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_VP8) {
-			pdata.video_work_route = 1;
-			goto decision_done;
-		}
-
-		rc_mode = msm_comm_g_ctrl_for_id(inst,
-			V4L2_CID_MPEG_VIDEO_BITRATE_MODE);
 		slice_mode =  msm_comm_g_ctrl_for_id(inst,
 				V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE);
-		output_height = inst->prop.height[CAPTURE_PORT];
-		output_width = inst->prop.width[CAPTURE_PORT];
-		fps = inst->clk_data.frame_rate >> 16;
-		mbps = NUM_MBS_PER_SEC(output_height, output_width, fps);
-		cbr_plus = ((rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR &&
-			mbps > CBR_MB_LIMIT) ||
-			(rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR &&
-			mbps > CBR_VFR_MB_LIMIT));
+		output_height = inst->prop.height[OUTPUT_PORT];
+		output_width = inst->prop.width[OUTPUT_PORT];
+		num_mbs = NUM_MBS_PER_FRAME(output_height, output_width);
+
+		is_1080p_above =
+			((output_height > 1088 && output_width > 1920) ||
+			 (output_height > 1920 && output_width > 1088) ||
+			 num_mbs > NUM_MBS_PER_FRAME(1088, 1920));
+
 		if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES ||
-			((mbps <= NUM_MBS_PER_SEC(1920, 1088, 60)) && !cbr_plus)
-			) {
+			inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_VP8 ||
+			(!is_1080p_above && !cbr_plus)) {
 			pdata.video_work_route = 1;
-			dprintk(VIDC_DBG, "Configured work route = 1");
 		}
 	} else {
 		return -EINVAL;
 	}
 
-decision_done:
+	dprintk(VIDC_DBG, "Configurng work route = %u",
+			pdata.video_work_route);
 
 	inst->clk_data.work_route = pdata.video_work_route;
 	rc = call_hfi_op(hdev, session_set_property,
@@ -1486,6 +1481,7 @@
 	struct hfi_video_work_mode pdata;
 	struct hfi_enable latency;
 	u32 num_mbs = 0;
+	u32 width, height;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_ERR,
@@ -1496,28 +1492,52 @@
 
 	hdev = inst->core->device;
 	pdata.video_work_mode = HFI_WORKMODE_2;
+	latency.enable = inst->clk_data.low_latency_mode;
 
-	if (inst->clk_data.low_latency_mode) {
-		pdata.video_work_mode = HFI_WORKMODE_1;
-		dprintk(VIDC_DBG, "Configured work mode = 1");
-	} else if (inst->session_type == MSM_VIDC_DECODER) {
-		num_mbs = NUM_MBS_PER_FRAME(
-					inst->prop.height[OUTPUT_PORT],
-					inst->prop.width[OUTPUT_PORT]);
+	if (inst->session_type == MSM_VIDC_DECODER) {
+		height = inst->prop.height[CAPTURE_PORT];
+		width = inst->prop.width[CAPTURE_PORT];
+		num_mbs = NUM_MBS_PER_FRAME(height, width);
 		if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_MPEG2 ||
-			(inst->pic_struct != MSM_VIDC_PIC_STRUCT_PROGRESSIVE) ||
-			(num_mbs < NUM_MBS_PER_FRAME(720, 1280)))
+			inst->pic_struct != MSM_VIDC_PIC_STRUCT_PROGRESSIVE ||
+			inst->clk_data.low_latency_mode ||
+			(width < 1280 && height < 720) ||
+			(width < 720 && height < 1280) ||
+			num_mbs < NUM_MBS_PER_FRAME(720, 1280)) {
 			pdata.video_work_mode = HFI_WORKMODE_1;
+		}
 	} else if (inst->session_type == MSM_VIDC_ENCODER) {
-		if (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_VP8) {
+		height = inst->prop.height[OUTPUT_PORT];
+		width = inst->prop.width[OUTPUT_PORT];
+		num_mbs = NUM_MBS_PER_FRAME(height, width);
+		if ((num_mbs >= NUM_MBS_PER_FRAME(2160, 4096) ||
+			(width < 4096 && height < 2160) ||
+			(width < 2160 && height < 4096)) &&
+			(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_VP8 ||
+			 inst->clk_data.low_latency_mode)) {
 			pdata.video_work_mode = HFI_WORKMODE_1;
 			/* For WORK_MODE_1, set Low Latency mode by default */
-			inst->clk_data.low_latency_mode = true;
+			latency.enable = true;
 		}
 	} else {
 		return -EINVAL;
 	}
 
+	dprintk(VIDC_DBG, "Configuring work mode = %u low latency = %u",
+			inst->clk_data.work_mode,
+			latency.enable);
+
+	rc = call_hfi_op(hdev, session_set_property,
+		(void *)inst->session,
+		HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE,
+		(void *)&latency, sizeof(latency));
+	if (rc)
+		dprintk(VIDC_WARN,
+			" Failed to configure low latency %pK\n", inst);
+	else
+		inst->clk_data.low_latency_mode = latency.enable;
+
+
 	inst->clk_data.work_mode = pdata.video_work_mode;
 	rc = call_hfi_op(hdev, session_set_property,
 			(void *)inst->session, HFI_PROPERTY_PARAM_WORK_MODE,
@@ -1526,15 +1546,6 @@
 		dprintk(VIDC_WARN,
 			" Failed to configure Work Mode %pK\n", inst);
 
-	if (inst->clk_data.low_latency_mode &&
-		inst->session_type == MSM_VIDC_ENCODER){
-		latency.enable = true;
-		rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session,
-			HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE,
-			(void *)&latency, sizeof(latency));
-	}
-
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index f96c57d..35586d7 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -302,6 +302,7 @@
 	struct session_crop crop_info;
 	u32 fps;
 	u32 bitrate;
+	bool bframe_changed;
 };
 
 struct buf_queue {
@@ -361,6 +362,7 @@
 	u32 opb_fourcc;
 	u32 work_mode;
 	bool low_latency_mode;
+	bool is_cbr_plus;
 	bool turbo_mode;
 	u32 work_route;
 	u32 dcvs_flags;
@@ -486,6 +488,7 @@
 	u32 frame_quality;
 	u32 rc_type;
 	u32 hybrid_hp;
+	u32 layer_bitrate;
 	u32 client_set_ctrls;
 	struct internal_buf *dpb_extra_binfo;
 	struct msm_vidc_codec_data *codec_data;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index cb6f5e3..a1df90b 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -161,7 +161,7 @@
 	},
 	{
 		.key = "qcom,sw-power-collapse",
-		.value = 1,
+		.value = 0,
 	},
 	{
 		.key = "qcom,domain-attr-non-fatal-faults",
@@ -196,16 +196,16 @@
 		.value = 60,
 	},
 	{
-		.key = "qcom,max-b-frame-size",
-		.value = 8160,
+		.key = "qcom,max-b-frame-mbs-per-frame",
+		.value = 32400, /* 3840x2160/256 */
 	},
 	{
-		.key = "qcom,max-b-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-b-frame-mbs-per-sec",
+		.value = 1944000, /* 3840x2160/256 MBs@60fps */
 	},
 	{
 		.key = "qcom,power-collapse-delay",
-		.value = 1500,
+		.value = 15000,
 	},
 	{
 		.key = "qcom,hw-resp-timeout",
@@ -229,11 +229,11 @@
 	},
 	{
 		.key = "qcom,fw-cycles",
-		.value = 760000,
+		.value = 326389,
 	},
 	{
 		.key = "qcom,fw-vpp-cycles",
-		.value = 166667,
+		.value = 44156,
 	},
 };
 
@@ -267,12 +267,12 @@
 		.value = 60,
 	},
 	{
-		.key = "qcom,max-b-frame-size",
+		.key = "qcom,max-b-frame-mbs-per-frame",
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-b-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-b-frame-mbs-per-sec",
+		.value = 489600,
 	},
 	{
 		.key = "qcom,power-collapse-delay",
@@ -338,12 +338,12 @@
 		.value = 60,
 	},
 	{
-		.key = "qcom,max-b-frame-size",
+		.key = "qcom,max-b-frame-mbs-per-frame",
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-b-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-b-frame-mbs-per-sec",
+		.value = 489600,
 	},
 	{
 		.key = "qcom,power-collapse-delay",
@@ -413,12 +413,12 @@
 		.value = 60,
 	},
 	{
-		.key = "qcom,max-b-frame-size",
+		.key = "qcom,max-b-frame-mbs-per-frame",
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-b-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-b-frame-mbs-per-sec",
+		.value = 489600,
 	},
 	{
 		.key = "qcom,power-collapse-delay",
@@ -468,12 +468,12 @@
 		.value = 60,
 	},
 	{
-		.key = "qcom,max-b-frame-size",
+		.key = "qcom,max-b-frame-mbs-per-frame",
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-b-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-b-frame-mbs-per-sec",
+		.value = 489600,
 	},
 	{
 		.key = "qcom,power-collapse-delay",
@@ -519,12 +519,12 @@
 		.value = 60,
 	},
 	{
-		.key = "qcom,max-b-frame-size",
+		.key = "qcom,max-b-frame-mbs-per-frame",
 		.value = 8160,
 	},
 	{
-		.key = "qcom,max-b-frames-per-sec",
-		.value = 60,
+		.key = "qcom,max-b-frame-mbs-per-sec",
+		.value = 489600,
 	},
 	{
 		.key = "qcom,power-collapse-delay",
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 8411bc0..2cf5640 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -784,6 +784,12 @@
 	res->max_hq_fps = find_key_value(platform_data,
 			"qcom,max-hq-frames-per-sec");
 
+	res->max_bframe_mbs_per_frame = find_key_value(platform_data,
+			"qcom,max-b-frame-mbs-per-frame");
+
+	res->max_bframe_mbs_per_sec = find_key_value(platform_data,
+			"qcom,max-b-frame-mbs-per-sec");
+
 	res->sw_power_collapsible = find_key_value(platform_data,
 			"qcom,sw-power-collapse");
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 431ff9d..69ed313 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -171,6 +171,8 @@
 	uint32_t max_load;
 	uint32_t max_hq_mbs_per_frame;
 	uint32_t max_hq_fps;
+	uint32_t max_bframe_mbs_per_frame;
+	uint32_t max_bframe_mbs_per_sec;
 	struct platform_device *pdev;
 	struct regulator_set regulator_set;
 	struct clock_set clock_set;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index ca87d3e..ac49c62 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -105,28 +105,28 @@
 static void interrupt_init_iris1(struct venus_hfi_device *device);
 static void setup_dsp_uc_memmap_iris1(struct venus_hfi_device *device);
 static void clock_config_on_enable_iris1(struct venus_hfi_device *device);
-static int prepare_ahb2axi_bridge(struct venus_hfi_device *device);
+static int reset_ahb2axi_bridge(struct venus_hfi_device *device);
 static int __set_ubwc_config(struct venus_hfi_device *device);
 
 struct venus_hfi_vpu_ops vpu4_ops = {
 	.interrupt_init = interrupt_init_vpu4,
 	.setup_dsp_uc_memmap = NULL,
 	.clock_config_on_enable = NULL,
-	.prepare_ahb2axi_bridge = NULL,
+	.reset_ahb2axi_bridge = NULL,
 };
 
 struct venus_hfi_vpu_ops iris1_ops = {
 	.interrupt_init = interrupt_init_iris1,
 	.setup_dsp_uc_memmap = setup_dsp_uc_memmap_iris1,
 	.clock_config_on_enable = clock_config_on_enable_iris1,
-	.prepare_ahb2axi_bridge = prepare_ahb2axi_bridge,
+	.reset_ahb2axi_bridge = reset_ahb2axi_bridge,
 };
 
 struct venus_hfi_vpu_ops iris2_ops = {
 	.interrupt_init = interrupt_init_iris1,
 	.setup_dsp_uc_memmap = NULL,
 	.clock_config_on_enable = NULL,
-	.prepare_ahb2axi_bridge = prepare_ahb2axi_bridge,
+	.reset_ahb2axi_bridge = reset_ahb2axi_bridge,
 };
 
 /**
@@ -570,7 +570,7 @@
 {
 	struct hfi_queue_header *queue;
 	u32 packet_size_in_words, new_write_idx;
-	u32 empty_space, read_idx;
+	u32 empty_space, read_idx, write_idx;
 	u32 *write_ptr;
 
 	if (!qinfo || !packet) {
@@ -593,16 +593,18 @@
 	}
 
 	packet_size_in_words = (*(u32 *)packet) >> 2;
-	if (!packet_size_in_words) {
-		dprintk(VIDC_ERR, "Zero packet size\n");
+	if (!packet_size_in_words || packet_size_in_words >
+		qinfo->q_array.mem_size>>2) {
+		dprintk(VIDC_ERR, "Invalid packet size\n");
 		return -ENODATA;
 	}
 
 	read_idx = queue->qhdr_read_idx;
+	write_idx = queue->qhdr_write_idx;
 
-	empty_space = (queue->qhdr_write_idx >=  read_idx) ?
-		(queue->qhdr_q_size - (queue->qhdr_write_idx -  read_idx)) :
-		(read_idx - queue->qhdr_write_idx);
+	empty_space = (write_idx >=  read_idx) ?
+		((qinfo->q_array.mem_size>>2) - (write_idx -  read_idx)) :
+		(read_idx - write_idx);
 	if (empty_space <= packet_size_in_words) {
 		queue->qhdr_tx_req =  1;
 		dprintk(VIDC_ERR, "Insufficient size (%d) to write (%d)\n",
@@ -612,13 +614,20 @@
 
 	queue->qhdr_tx_req =  0;
 
-	new_write_idx = (queue->qhdr_write_idx + packet_size_in_words);
+	new_write_idx = write_idx + packet_size_in_words;
 	write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
-		(queue->qhdr_write_idx << 2));
-	if (new_write_idx < queue->qhdr_q_size) {
+			(write_idx << 2));
+	if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+	    write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+	    qinfo->q_array.mem_size)) {
+		dprintk(VIDC_ERR, "Invalid write index");
+		return -ENODATA;
+	}
+
+	if (new_write_idx < (qinfo->q_array.mem_size >> 2)) {
 		memcpy(write_ptr, packet, packet_size_in_words << 2);
 	} else {
-		new_write_idx -= queue->qhdr_q_size;
+		new_write_idx -= qinfo->q_array.mem_size >> 2;
 		memcpy(write_ptr, packet, (packet_size_in_words -
 			new_write_idx) << 2);
 		memcpy((void *)qinfo->q_array.align_virtual_addr,
@@ -704,7 +713,8 @@
 	u32 packet_size_in_words, new_read_idx;
 	u32 *read_ptr;
 	u32 receive_request = 0;
-		int rc = 0;
+	u32 read_idx, write_idx;
+	int rc = 0;
 
 	if (!qinfo || !packet || !pb_tx_req_is_set) {
 		dprintk(VIDC_ERR, "Invalid Params\n");
@@ -737,7 +747,10 @@
 	if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
 		receive_request = 1;
 
-	if (queue->qhdr_read_idx == queue->qhdr_write_idx) {
+	read_idx = queue->qhdr_read_idx;
+	write_idx = queue->qhdr_write_idx;
+
+	if (read_idx == write_idx) {
 		queue->qhdr_rx_req = receive_request;
 		/*
 		 * mb() to ensure qhdr is updated in main memory
@@ -754,21 +767,28 @@
 	}
 
 	read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
-				(queue->qhdr_read_idx << 2));
+				(read_idx << 2));
+	if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+	    read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+	    qinfo->q_array.mem_size - sizeof(*read_ptr))) {
+		dprintk(VIDC_ERR, "Invalid read index\n");
+		return -ENODATA;
+	}
+
 	packet_size_in_words = (*read_ptr) >> 2;
 	if (!packet_size_in_words) {
 		dprintk(VIDC_ERR, "Zero packet size\n");
 		return -ENODATA;
 	}
 
-	new_read_idx = queue->qhdr_read_idx + packet_size_in_words;
-	if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE)
-			&& queue->qhdr_read_idx <= queue->qhdr_q_size) {
-		if (new_read_idx < queue->qhdr_q_size) {
+	new_read_idx = read_idx + packet_size_in_words;
+	if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE) &&
+		read_idx <= (qinfo->q_array.mem_size >> 2)) {
+		if (new_read_idx < (qinfo->q_array.mem_size >> 2)) {
 			memcpy(packet, read_ptr,
 					packet_size_in_words << 2);
 		} else {
-			new_read_idx -= queue->qhdr_q_size;
+			new_read_idx -= (qinfo->q_array.mem_size >> 2);
 			memcpy(packet, read_ptr,
 			(packet_size_in_words - new_read_idx) << 2);
 			memcpy(packet + ((packet_size_in_words -
@@ -779,18 +799,18 @@
 	} else {
 		dprintk(VIDC_WARN,
 			"BAD packet received, read_idx: %#x, pkt_size: %d\n",
-			queue->qhdr_read_idx, packet_size_in_words << 2);
+			read_idx, packet_size_in_words << 2);
 		dprintk(VIDC_WARN, "Dropping this packet\n");
-		new_read_idx = queue->qhdr_write_idx;
+		new_read_idx = write_idx;
 		rc = -ENODATA;
 	}
 
-	queue->qhdr_read_idx = new_read_idx;
-
-	if (queue->qhdr_read_idx != queue->qhdr_write_idx)
+	if (new_read_idx != write_idx)
 		queue->qhdr_rx_req = 0;
 	else
 		queue->qhdr_rx_req = receive_request;
+
+	queue->qhdr_read_idx = new_read_idx;
 	/*
 	 * mb() to ensure qhdr is updated in main memory
 	 * so that venus reads the updated header values
@@ -1237,6 +1257,7 @@
 
 	/* Enable interrupt before sending commands to venus */
 	__write_register(device, VIDC_CPU_CS_H2XSOFTINTEN, 0x1);
+	__write_register(device, VIDC_CPU_CS_X2RPMh, 0x0);
 
 	return rc;
 }
@@ -2257,7 +2278,7 @@
 	struct venus_hfi_device *device;
 	int rc = 0;
 
-	if (!session || !session->device || !pdata) {
+	if (!session || !session->device) {
 		dprintk(VIDC_ERR, "Invalid Params\n");
 		return -EINVAL;
 	}
@@ -3694,22 +3715,46 @@
 	if (!rst_set->reset_tbl)
 		return 0;
 
-	dprintk(VIDC_DBG, "%s reset_state %d\n", __func__, state);
+	dprintk(VIDC_DBG, "%s reset_state %d rst_set->count = %d\n",
+		__func__, state, rst_set->count);
+
 	for (i = 0; i < rst_set->count; i++) {
 		rst = rst_set->reset_tbl[i].rst;
 		switch (state) {
 		case INIT:
+			dprintk(VIDC_DBG, "%s reset_state name = %s %pK\n",
+				__func__, rst_set->reset_tbl[i].name, rst);
+
+			if (rst)
+				continue;
 			rst = devm_reset_control_get(&res->pdev->dev,
-						rst_set->reset_tbl[i].name);
+				rst_set->reset_tbl[i].name);
 			if (IS_ERR(rst))
 				rc = PTR_ERR(rst);
 
 			rst_set->reset_tbl[i].rst = rst;
 			break;
 		case ASSERT:
+			if (!rst) {
+				dprintk(VIDC_DBG,
+					"%s reset_state name = %s %pK\n",
+					__func__, rst_set->reset_tbl[i].name,
+					 rst);
+				 rc = PTR_ERR(rst);
+				goto failed_to_reset;
+			}
+
 			rc = reset_control_assert(rst);
 			break;
 		case DEASSERT:
+			if (!rst) {
+				dprintk(VIDC_DBG,
+					"%s reset_state name = %s %pK\n",
+					__func__, rst_set->reset_tbl[i].name,
+					 rst);
+				 rc = PTR_ERR(rst);
+				goto failed_to_reset;
+			}
 			rc = reset_control_deassert(rst);
 			break;
 		default:
@@ -3717,9 +3762,13 @@
 		}
 
 		if (rc)
-			return rc;
+			goto failed_to_reset;
 	}
+
 	return 0;
+
+failed_to_reset:
+	return rc;
 }
 
 static inline void __disable_unprepare_clks(struct venus_hfi_device *device)
@@ -3751,7 +3800,7 @@
 	}
 }
 
-static int prepare_ahb2axi_bridge(struct venus_hfi_device *device)
+static int reset_ahb2axi_bridge(struct venus_hfi_device *device)
 {
 	int rc;
 
@@ -4514,9 +4563,9 @@
 		goto fail_enable_gdsc;
 	}
 
-	rc = call_venus_op(device, prepare_ahb2axi_bridge, device);
+	rc = call_venus_op(device, reset_ahb2axi_bridge, device);
 	if (rc) {
-		dprintk(VIDC_ERR, "Failed to enable ahb2axi: %d\n", rc);
+		dprintk(VIDC_ERR, "Failed to reset ahb2axi: %d\n", rc);
 		goto fail_enable_clks;
 	}
 
@@ -4573,6 +4622,9 @@
 	device->intr_status = 0;
 
 	__disable_unprepare_clks(device);
+	if (call_venus_op(device, reset_ahb2axi_bridge, device))
+		dprintk(VIDC_ERR, "Failed to reset ahb2axi\n");
+
 	if (__disable_regulators(device))
 		dprintk(VIDC_WARN, "Failed to disable regulators\n");
 
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.h b/drivers/media/platform/msm/vidc/venus_hfi.h
index c771a30..bf27d5a 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.h
+++ b/drivers/media/platform/msm/vidc/venus_hfi.h
@@ -236,7 +236,7 @@
 	void (*interrupt_init)(struct venus_hfi_device *ptr);
 	void (*setup_dsp_uc_memmap)(struct venus_hfi_device *device);
 	void (*clock_config_on_enable)(struct venus_hfi_device *device);
-	int (*prepare_ahb2axi_bridge)(struct venus_hfi_device *device);
+	int (*reset_ahb2axi_bridge)(struct venus_hfi_device *device);
 };
 
 struct venus_hfi_device {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index e208f46..ac361c7 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -186,6 +186,8 @@
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0021)
 #define HFI_PROPERTY_PARAM_VDEC_UBWC_CR_STAT_INFO_EXTRADATA	\
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0022)
+#define HFI_PROPERTY_PARAM_HDR10_HIST_EXTRADATA \
+	(HFI_PROPERTY_PARAM_OX_START + 0x0023)
 
 #define HFI_PROPERTY_CONFIG_VDEC_OX_START				\
 	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x4000)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 438332d..62c068e 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -1097,8 +1097,8 @@
 	struct msm_vidc_content_light_level_sei_payload cll_info;
 };
 
-struct hfi_vbv_hdr_buf_size {
-	u32 vbv_hdr_buf_size;
+struct hfi_vbv_hrd_buf_size {
+	u32 vbv_hrd_buf_size;
 };
 
 #endif
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_io.h b/drivers/media/platform/msm/vidc/vidc_hfi_io.h
index d9804f0..eb47f68 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_io.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_io.h
@@ -61,6 +61,15 @@
 
 #define VIDC_CPU_CS_SCIBARG3		(VIDC_CPU_CS_BASE_OFFS + 0x6C)
 
+/* FAL10 Feature Control */
+#define VIDC_CPU_CS_X2RPMh		(VIDC_CPU_CS_BASE_OFFS + 0x168)
+#define VIDC_CPU_CS_X2RPMh_MASK0_BMSK	0x1
+#define VIDC_CPU_CS_X2RPMh_MASK0_SHFT	0x0
+#define VIDC_CPU_CS_X2RPMh_MASK1_BMSK	0x2
+#define VIDC_CPU_CS_X2RPMh_MASK1_SHFT	0x1
+#define VIDC_CPU_CS_X2RPMh_SWOVERRIDE_BMSK	0x4
+#define VIDC_CPU_CS_X2RPMh_SWOVERRIDE_SHFT	0x3
+
 #define VIDC_CPU_IC_SOFTINT		(VIDC_CPU_IC_BASE_OFFS + 0x150)
 #define VIDC_CPU_IC_SOFTINT_H2A_BMSK	0x1
 #define VIDC_CPU_IC_SOFTINT_H2A_SHFT	0x0
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index bb6add9..5b8350e 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -264,6 +264,14 @@
 	if (ret)
 		return ret;
 
+	if (!dev->dma_parms) {
+		dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+					      GFP_KERNEL);
+		if (!dev->dma_parms)
+			return -ENOMEM;
+	}
+	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
 	INIT_LIST_HEAD(&core->instances);
 	mutex_init(&core->lock);
 	INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index f06003b..2a92e5a 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -865,8 +865,11 @@
 			"%s-vid-cap", dev->v4l2_dev.name);
 
 	if (IS_ERR(dev->kthread_vid_cap)) {
+		int err = PTR_ERR(dev->kthread_vid_cap);
+
+		dev->kthread_vid_cap = NULL;
 		v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
-		return PTR_ERR(dev->kthread_vid_cap);
+		return err;
 	}
 	*pstreaming = true;
 	vivid_grab_controls(dev, true);
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index 9981e75..4885905 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -236,8 +236,11 @@
 			"%s-vid-out", dev->v4l2_dev.name);
 
 	if (IS_ERR(dev->kthread_vid_out)) {
+		int err = PTR_ERR(dev->kthread_vid_out);
+
+		dev->kthread_vid_out = NULL;
 		v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
-		return PTR_ERR(dev->kthread_vid_out);
+		return err;
 	}
 	*pstreaming = true;
 	vivid_grab_controls(dev, true);
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 1599159..baa7c83 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -438,6 +438,8 @@
 		tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
 		break;
 	}
+	vfree(dev->bitmap_cap);
+	dev->bitmap_cap = NULL;
 	vivid_update_quality(dev);
 	tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
 	dev->crop_cap = dev->src_rect;
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index be531ca..2079861 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -21,7 +21,7 @@
 	.type = V4L2_DV_BT_656_1120,
 	/* keep this initialization for compatibility with GCC < 4.4.6 */
 	.reserved = { 0 },
-	V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
+	V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
 		V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
 		V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
 		V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
diff --git a/drivers/media/radio/rtc6226/radio-rtc6226-common.c b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
index a7e7dfa..bfb4c6a 100644
--- a/drivers/media/radio/rtc6226/radio-rtc6226-common.c
+++ b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
@@ -120,64 +120,12 @@
 /* 0: 87.5 - 108 MHz (USA, Europe)*/
 /* 1: 76   - 108 MHz (Japan wide band) */
 /* 2: 76   -  90 MHz (Japan) */
-static unsigned short band;
 
 /* De-emphasis */
 /* 0: 75 us (USA) */
 /* 1: 50 us (Europe, Australia, Japan) */
 static unsigned short de;
 
-static const struct v4l2_frequency_band bands[] = {
-	{
-		.type = V4L2_TUNER_RADIO,
-		.index = 0,
-		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
-			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
-			    V4L2_TUNER_CAP_FREQ_BANDS |
-			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |
-			    V4L2_TUNER_CAP_HWSEEK_WRAP,
-		.rangelow   =  87500,
-		.rangehigh  = 108000,
-		.modulation = V4L2_BAND_MODULATION_FM,
-	},
-	{
-		.type = V4L2_TUNER_RADIO,
-		.index = 1,
-		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
-			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
-			    V4L2_TUNER_CAP_FREQ_BANDS |
-			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |
-			    V4L2_TUNER_CAP_HWSEEK_WRAP,
-		.rangelow   =  76000,
-		.rangehigh  = 108000,
-		.modulation = V4L2_BAND_MODULATION_FM,
-	},
-	{
-		.type = V4L2_TUNER_RADIO,
-		.index = 2,
-		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
-			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
-			    V4L2_TUNER_CAP_FREQ_BANDS |
-			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |
-			    V4L2_TUNER_CAP_HWSEEK_WRAP,
-		.rangelow   =  76000,
-		.rangehigh  =  91000,
-		.modulation = V4L2_BAND_MODULATION_FM,
-	},
-	{
-		.type = V4L2_TUNER_RADIO,
-		.index = 3,
-		.capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
-			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
-			    V4L2_TUNER_CAP_FREQ_BANDS |
-			    V4L2_TUNER_CAP_HWSEEK_BOUNDED |
-			    V4L2_TUNER_CAP_HWSEEK_WRAP,
-		.rangelow   =  64000,
-		.rangehigh  =  76000,
-		.modulation = V4L2_BAND_MODULATION_FM,
-	},
-};
-
 wait_queue_head_t rtc6226_wq;
 int rtc6226_wq_flag = NO_WAIT;
 #ifdef New_VolumeControl
@@ -753,8 +701,8 @@
 	u32 band_high_limit;
 	u8 spacing = 0;
 
-	band_low_limit = bands[radio->band].rangelow;
-	band_high_limit = bands[radio->band].rangehigh;
+	band_low_limit = radio->recv_conf.band_low_limit * TUNE_STEP_SIZE;
+	band_high_limit = radio->recv_conf.band_high_limit * TUNE_STEP_SIZE;
 
 	if (radio->space == 0)
 		spacing = CH_SPACING_200;
@@ -2197,6 +2145,8 @@
 {
 	struct rtc6226_device *radio = video_drvdata(file);
 	int retval = 0;
+	u16 bottom_freq;
+	u16 top_freq;
 
 	pr_info("%s entry\n", __func__);
 
@@ -2214,33 +2164,34 @@
 		radio->registers[MPXCFG] &= ~MPXCFG_CSR0_MONO; /* try stereo */
 		break;
 	default:
-		goto done;
+		pr_debug("%s audmode is not set\n", __func__);
 	}
 
 	retval = rtc6226_set_register(radio, MPXCFG);
 
-	pr_info("%s low:%d high:%d\n", __func__,
-		tuner->rangelow, tuner->rangehigh);
+	/*  unit is 10kHz */
+	top_freq = (u16)((tuner->rangehigh / TUNE_PARAM) / TUNE_STEP_SIZE);
+	bottom_freq = (u16)((tuner->rangelow / TUNE_PARAM) / TUNE_STEP_SIZE);
 
-	/* set band */
-	if (tuner->rangelow || tuner->rangehigh) {
-		for (band = 0; band < ARRAY_SIZE(bands); band++) {
-			if (bands[band].rangelow  == tuner->rangelow &&
-				bands[band].rangehigh == tuner->rangehigh)
-				break;
-		}
-		if (band == ARRAY_SIZE(bands)) {
-			pr_err("%s err\n", __func__);
-			band = 0;
-		}
-	} else
-		band = 0; /* If nothing is specified seek 87.5 - 108 Mhz */
+	pr_debug("%s low:%d high:%d\n", __func__,
+		bottom_freq, top_freq);
 
-	if (radio->band != band) {
-		radio->registers[CHANNEL] |= (band  << 12);
-		rtc6226_set_register(radio, MPXCFG);
-		radio->band = band;
-	}
+	radio->registers[RADIOSEEKCFG1] = top_freq;
+	radio->registers[RADIOSEEKCFG2] = bottom_freq;
+
+	retval = rtc6226_set_register(radio, RADIOSEEKCFG1);
+	if (retval < 0)
+		pr_err("In %s, error %d setting higher limit freq\n",
+			__func__, retval);
+	else
+		radio->recv_conf.band_high_limit = top_freq;
+
+	retval = rtc6226_set_register(radio, RADIOSEEKCFG2);
+	if (retval < 0)
+		pr_err("In %s, error %d setting lower limit freq\n",
+			__func__, retval);
+	else
+		radio->recv_conf.band_low_limit = bottom_freq;
 done:
 	pr_info("%s exit %d\n", __func__, retval);
 	return retval;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index ca68e1d..8b2c16d 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -707,7 +707,8 @@
 			 (dev->last_toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0)
 	};
 
-	ir_lirc_scancode_event(dev, &sc);
+	if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
+		ir_lirc_scancode_event(dev, &sc);
 
 	spin_lock_irqsave(&dev->keylock, flags);
 
@@ -747,7 +748,8 @@
 		.keycode = keycode
 	};
 
-	ir_lirc_scancode_event(dev, &sc);
+	if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
+		ir_lirc_scancode_event(dev, &sc);
 
 	if (new_event && dev->keypressed)
 		ir_do_keyup(dev, false);
diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
index 024c751..2ad2dde 100644
--- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
@@ -155,7 +155,6 @@
 				stream->props.u.bulk.buffersize,
 				usb_urb_complete, stream);
 
-		stream->urb_list[i]->transfer_flags = URB_FREE_BUFFER;
 		stream->urbs_initialized++;
 	}
 	return 0;
@@ -186,7 +185,7 @@
 		urb->complete = usb_urb_complete;
 		urb->pipe = usb_rcvisocpipe(stream->udev,
 				stream->props.endpoint);
-		urb->transfer_flags = URB_ISO_ASAP | URB_FREE_BUFFER;
+		urb->transfer_flags = URB_ISO_ASAP;
 		urb->interval = stream->props.u.isoc.interval;
 		urb->number_of_packets = stream->props.u.isoc.framesperurb;
 		urb->transfer_buffer_length = stream->props.u.isoc.framesize *
@@ -210,7 +209,7 @@
 	if (stream->state & USB_STATE_URB_BUF) {
 		while (stream->buf_num) {
 			stream->buf_num--;
-			stream->buf_list[stream->buf_num] = NULL;
+			kfree(stream->buf_list[stream->buf_num]);
 		}
 	}
 
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index d46dc43..361abbc 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1824,11 +1824,7 @@
 	usb_put_intf(dev->intf);
 	usb_put_dev(dev->udev);
 
-	if (dev->vdev.dev)
-		v4l2_device_unregister(&dev->vdev);
 #ifdef CONFIG_MEDIA_CONTROLLER
-	if (media_devnode_is_registered(dev->mdev.devnode))
-		media_device_unregister(&dev->mdev);
 	media_device_cleanup(&dev->mdev);
 #endif
 
@@ -1885,6 +1881,15 @@
 
 		uvc_debugfs_cleanup_stream(stream);
 	}
+
+	uvc_status_unregister(dev);
+
+	if (dev->vdev.dev)
+		v4l2_device_unregister(&dev->vdev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+	if (media_devnode_is_registered(dev->mdev.devnode))
+		media_device_unregister(&dev->mdev);
+#endif
 }
 
 int uvc_register_video_device(struct uvc_device *dev,
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index 0722dc6..883e4ca 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -54,7 +54,7 @@
 	return ret;
 }
 
-static void uvc_input_cleanup(struct uvc_device *dev)
+static void uvc_input_unregister(struct uvc_device *dev)
 {
 	if (dev->input)
 		input_unregister_device(dev->input);
@@ -71,7 +71,7 @@
 
 #else
 #define uvc_input_init(dev)
-#define uvc_input_cleanup(dev)
+#define uvc_input_unregister(dev)
 #define uvc_input_report_key(dev, code, value)
 #endif /* CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV */
 
@@ -292,12 +292,16 @@
 	return 0;
 }
 
-void uvc_status_cleanup(struct uvc_device *dev)
+void uvc_status_unregister(struct uvc_device *dev)
 {
 	usb_kill_urb(dev->int_urb);
+	uvc_input_unregister(dev);
+}
+
+void uvc_status_cleanup(struct uvc_device *dev)
+{
 	usb_free_urb(dev->int_urb);
 	kfree(dev->status);
-	uvc_input_cleanup(dev);
 }
 
 int uvc_status_start(struct uvc_device *dev, gfp_t flags)
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index e5f5d84..a738486 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -750,6 +750,7 @@
 
 /* Status */
 int uvc_status_init(struct uvc_device *dev);
+void uvc_status_unregister(struct uvc_device *dev);
 void uvc_status_cleanup(struct uvc_device *dev);
 int uvc_status_start(struct uvc_device *dev, gfp_t flags);
 void uvc_status_stop(struct uvc_device *dev);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index d0ff403..931fff7 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -286,6 +286,7 @@
 	const struct v4l2_window *win;
 	const struct v4l2_sdr_format *sdr;
 	const struct v4l2_meta_format *meta;
+	u32 planes;
 	unsigned i;
 
 	pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -316,7 +317,8 @@
 			prt_names(mp->field, v4l2_field_names),
 			mp->colorspace, mp->num_planes, mp->flags,
 			mp->ycbcr_enc, mp->quantization, mp->xfer_func);
-		for (i = 0; i < mp->num_planes; i++)
+		planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
+		for (i = 0; i < planes; i++)
 			printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
 					mp->plane_fmt[i].bytesperline,
 					mp->plane_fmt[i].sizeimage);
@@ -1297,6 +1299,90 @@
 	case V4L2_META_FMT_VSP1_HGO:	descr = "R-Car VSP1 1-D Histogram"; break;
 	case V4L2_META_FMT_VSP1_HGT:	descr = "R-Car VSP1 2-D Histogram"; break;
 	case V4L2_META_FMT_UVC:		descr = "UVC payload header metadata"; break;
+	case V4L2_PIX_FMT_NV12_UBWC:
+					descr = "NV12 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS:
+					descr = "Y/CbCr 4:2:0 P10 Venus"; break;
+	case V4L2_PIX_FMT_NV12_TP10_UBWC:
+					descr = "Y/CbCr 4:2:0 TP10 UBWC"; break;
+	case V4L2_PIX_FMT_NV12_512:
+				descr = "Y/CbCr 4:2:0 (512 align)"; break;
+	case V4L2_PIX_FMT_NV12_P010_UBWC:
+					descr = "Y/CbCr 4:2:0 P010 UBWC"; break;
+	case V4L2_PIX_FMT_RGBA8888_UBWC:
+					descr = "RGBA8888 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_ABGR_8888:
+					descr = "32-bit ABGR 8-8-8-8"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_8888:
+					descr = "32-bit RGBA 8-8-8-8"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_8888:
+					descr = "32-bit RGBX 8-8-8-8"; break;
+	case V4L2_PIX_FMT_SDE_XBGR_8888:
+					descr = "32-bit XBGR 8-8-8-8"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_5551:
+					descr = "16-bit RGBA 5-5-5-1"; break;
+	case V4L2_PIX_FMT_SDE_ABGR_1555:
+					descr = "16-bit ABGR 1-5-5-5"; break;
+	case V4L2_PIX_FMT_SDE_BGRA_5551:
+					descr = "16-bit BGRA 5-5-5-1"; break;
+	case V4L2_PIX_FMT_SDE_BGRX_5551:
+					descr = "16-bit BGRX 5-5-5-1"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_5551:
+					descr = "16-bit RGBX 5-5-5-1"; break;
+	case V4L2_PIX_FMT_SDE_XBGR_1555:
+					descr = "16-bit XBGR 1-5-5-5"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_4444:
+					descr = "16-bit RGBA 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_BGRA_4444:
+					descr = "16-bit BGRA 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_ABGR_4444:
+					descr = "16-bit ABGR 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_4444:
+					descr = "16-bit RGBX 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_BGRX_4444:
+					descr = "16-bit BGRX 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_XBGR_4444:
+					descr = "16-bit XBGR 4-4-4-4"; break;
+	case V4L2_PIX_FMT_SDE_BGR_565:
+					descr = "16-bit BGR 5-6-5"; break;
+	case V4L2_PIX_FMT_SDE_Y_CR_CB_GH2V2:
+					descr = "Planar YVU 4:2:0 A16"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H1V2:
+					descr = "Y/CbCr 4:2:2"; break;
+	case V4L2_PIX_FMT_SDE_Y_CRCB_H1V2:
+					descr = "Y/CrCb 4:2:2"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_VENUS:
+					descr = "Y/CbCr 4:2:0 Venus"; break;
+	case V4L2_PIX_FMT_SDE_Y_CRCB_H2V2_VENUS:
+					descr = "Y/CrCb 4:2:0 Venus"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_8888_UBWC:
+					descr = "RGBX 8:8:8:8 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_RGB_565_UBWC:
+					descr = "RGB 5:6:5 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_1010102:
+					descr = "RGBA 10:10:10:2"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_1010102:
+					descr = "RGBX 10:10:10:2"; break;
+	case V4L2_PIX_FMT_SDE_ARGB_2101010:
+					descr = "ARGB 2:10:10:10"; break;
+	case V4L2_PIX_FMT_SDE_XRGB_2101010:
+					descr = "XRGB 2:10:10:10"; break;
+	case V4L2_PIX_FMT_SDE_BGRA_1010102:
+					descr = "BGRA 10:10:10:2"; break;
+	case V4L2_PIX_FMT_SDE_BGRX_1010102:
+					descr = "BGRX 10:10:10:2"; break;
+	case V4L2_PIX_FMT_SDE_ABGR_2101010:
+					descr = "ABGR 2:10:10:10"; break;
+	case V4L2_PIX_FMT_SDE_XBGR_2101010:
+					descr = "XBGR 2:10:10:10"; break;
+	case V4L2_PIX_FMT_SDE_RGBA_1010102_UBWC:
+					descr = "RGBA 10:10:10:2 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_RGBX_1010102_UBWC:
+					descr = "RGBX 10:10:10:2 UBWC"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_TP10:
+					descr = "Y/CbCr 4:2:0 TP10"; break;
+	case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010:
+					descr = "Y/CbCr 4:2:0 P10"; break;
 
 	default:
 		/* Compressed formats */
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index b893797..9c7925c 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -592,6 +592,29 @@
 	return 0;
 }
 
+static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
+{
+	struct tps6586x *tps6586x = dev_get_drvdata(dev);
+
+	if (tps6586x->client->irq)
+		disable_irq(tps6586x->client->irq);
+
+	return 0;
+}
+
+static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
+{
+	struct tps6586x *tps6586x = dev_get_drvdata(dev);
+
+	if (tps6586x->client->irq)
+		enable_irq(tps6586x->client->irq);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
+			 tps6586x_i2c_resume);
+
 static const struct i2c_device_id tps6586x_id_table[] = {
 	{ "tps6586x", 0 },
 	{ },
@@ -602,6 +625,7 @@
 	.driver	= {
 		.name	= "tps6586x",
 		.of_match_table = of_match_ptr(tps6586x_of_match),
+		.pm	= &tps6586x_pm_ops,
 	},
 	.probe		= tps6586x_i2c_probe,
 	.remove		= tps6586x_i2c_remove,
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 8679e0b..f4f8ab6 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -217,7 +217,7 @@
 void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
 			       dma_addr_t *dma_handle)
 {
-	if (get_order(size) > MAX_ORDER)
+	if (get_order(size) >= MAX_ORDER)
 		return NULL;
 
 	return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle,
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index b8aaa68..2ed23c9 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -820,21 +820,24 @@
  *
  * Return:
  *	0 - Success
+ *	Non-zero - Failure
  */
 static int ibmvmc_open(struct inode *inode, struct file *file)
 {
 	struct ibmvmc_file_session *session;
-	int rc = 0;
 
 	pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
 		 (unsigned long)inode, (unsigned long)file,
 		 ibmvmc.state);
 
 	session = kzalloc(sizeof(*session), GFP_KERNEL);
+	if (!session)
+		return -ENOMEM;
+
 	session->file = file;
 	file->private_data = session;
 
-	return rc;
+	return 0;
 }
 
 /**
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index e4b10b2..23739a6 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -127,6 +127,8 @@
 #define MEI_DEV_ID_BXT_M      0x1A9A  /* Broxton M */
 #define MEI_DEV_ID_APL_I      0x5A9A  /* Apollo Lake I */
 
+#define MEI_DEV_ID_DNV_IE     0x19E5  /* Denverton IE */
+
 #define MEI_DEV_ID_GLK        0x319A  /* Gemini Lake */
 
 #define MEI_DEV_ID_KBP        0xA2BA  /* Kaby Point */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index ea4e152..c8e21c8 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -88,11 +88,13 @@
 	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
-	{MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
 
 	{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
 
+	{MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
+
 	{MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
 
 	{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
index 57a6bb1..8f2c5d8 100644
--- a/drivers/misc/ocxl/config.c
+++ b/drivers/misc/ocxl/config.c
@@ -318,7 +318,7 @@
 		if (rc)
 			return rc;
 		ptr = (u32 *) &afu->name[i];
-		*ptr = val;
+		*ptr = le32_to_cpu((__force __le32) val);
 	}
 	afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */
 	return 0;
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index 31695a0..646d164 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -566,7 +566,7 @@
 
 	mutex_lock(&spa->spa_lock);
 
-	pe->tid = tid;
+	pe->tid = cpu_to_be32(tid);
 
 	/*
 	 * The barrier makes sure the PE is updated
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 9758701..5ae4fa7 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -420,6 +420,29 @@
 }
 __setup("androidboot.keymaster=", get_qseecom_keymaster_status);
 
+
+#define QSEECOM_SCM_EBUSY_WAIT_MS 30
+#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
+
+static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
+{
+	int ret = 0;
+	int retry_count = 0;
+
+	do {
+		ret = scm_call2_noretry(smc_id, desc);
+		if (ret == -EBUSY) {
+			mutex_unlock(&app_access_lock);
+			msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
+			mutex_lock(&app_access_lock);
+		}
+		if (retry_count == 33)
+			pr_warn("secure world has been busy for 1 second!\n");
+	} while (ret == -EBUSY &&
+			(retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
+	return ret;
+}
+
 static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
 			const void *req_buf, void *resp_buf)
 {
@@ -447,7 +470,7 @@
 				svc_id, tz_cmd_id);
 			return -EINVAL;
 		}
-		ret = scm_call2(smc_id, &desc);
+		ret = __qseecom_scm_call2_locked(smc_id, &desc);
 		break;
 	}
 	case SCM_SVC_ES: {
@@ -470,7 +493,7 @@
 			desc.args[0] = p_hash_req->partition_id;
 			desc.args[1] = virt_to_phys(tzbuf);
 			desc.args[2] = SHA256_DIGEST_LENGTH;
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			kzfree(tzbuf);
 			break;
 		}
@@ -505,7 +528,7 @@
 				desc.args[2] = req_64bit->phy_addr;
 			}
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_APP_SHUTDOWN_COMMAND: {
@@ -515,7 +538,7 @@
 			smc_id = TZ_OS_APP_SHUTDOWN_ID;
 			desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
 			desc.args[0] = req->app_id;
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_APP_LOOKUP_COMMAND: {
@@ -534,7 +557,7 @@
 			desc.args[0] = virt_to_phys(tzbuf);
 			desc.args[1] = strlen(req->app_name);
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			kzfree(tzbuf);
 			break;
 		}
@@ -558,7 +581,7 @@
 				desc.args[1] = req_64bit->size;
 			}
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
@@ -582,14 +605,14 @@
 				desc.args[2] = req_64bit->phy_addr;
 			}
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
 			smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
 			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_REGISTER_LISTENER: {
@@ -614,12 +637,12 @@
 			}
 			qseecom.smcinvoke_support = true;
 			smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			if (ret == -EIO) {
 				/* smcinvoke is not supported */
 				qseecom.smcinvoke_support = false;
 				smc_id = TZ_OS_REGISTER_LISTENER_ID;
-				ret = scm_call2(smc_id, &desc);
+				ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			}
 			break;
 		}
@@ -631,7 +654,7 @@
 			smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
 			desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
 			desc.args[0] = req->listener_id;
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_LISTENER_DATA_RSP_COMMAND: {
@@ -644,7 +667,7 @@
 				TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
 			desc.args[0] = req->listener_id;
 			desc.args[1] = req->status;
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
@@ -672,7 +695,7 @@
 				desc.args[2] = req_64->sglistinfo_ptr;
 				desc.args[3] = req_64->sglistinfo_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
@@ -694,14 +717,14 @@
 				desc.args[2] = req_64bit->phy_addr;
 			}
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
 			smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
 			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 			}
 
@@ -729,7 +752,7 @@
 				desc.args[3] = req_64bit->rsp_ptr;
 				desc.args[4] = req_64bit->rsp_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
@@ -761,7 +784,7 @@
 				desc.args[5] = req_64bit->sglistinfo_ptr;
 				desc.args[6] = req_64bit->sglistinfo_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
@@ -773,21 +796,21 @@
 			desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
 			desc.args[0] = req->key_type;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_RPMB_ERASE_COMMAND: {
 			smc_id = TZ_OS_RPMB_ERASE_ID;
 			desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
 			smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
 			desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_GENERATE_KEY: {
@@ -808,7 +831,7 @@
 			desc.args[0] = virt_to_phys(tzbuf);
 			desc.args[1] = tzbuflen;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			kzfree(tzbuf);
 			break;
 		}
@@ -830,7 +853,7 @@
 			desc.args[0] = virt_to_phys(tzbuf);
 			desc.args[1] = tzbuflen;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			kzfree(tzbuf);
 			break;
 		}
@@ -852,7 +875,7 @@
 			desc.args[0] = virt_to_phys(tzbuf);
 			desc.args[1] = tzbuflen;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			kzfree(tzbuf);
 			break;
 		}
@@ -874,7 +897,7 @@
 			desc.args[0] = virt_to_phys(tzbuf);
 			desc.args[1] = tzbuflen;
 			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			kzfree(tzbuf);
 			break;
 		}
@@ -900,7 +923,7 @@
 				desc.args[3] = req_64bit->resp_ptr;
 				desc.args[4] = req_64bit->resp_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
@@ -930,7 +953,7 @@
 				desc.args[5] = req_64bit->sglistinfo_ptr;
 				desc.args[6] = req_64bit->sglistinfo_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_TEE_INVOKE_COMMAND: {
@@ -955,7 +978,7 @@
 				desc.args[3] = req_64bit->resp_ptr;
 				desc.args[4] = req_64bit->resp_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
@@ -985,7 +1008,7 @@
 				desc.args[5] = req_64bit->sglistinfo_ptr;
 				desc.args[6] = req_64bit->sglistinfo_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_TEE_CLOSE_SESSION: {
@@ -1010,7 +1033,7 @@
 				desc.args[3] = req_64bit->resp_ptr;
 				desc.args[4] = req_64bit->resp_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_TEE_REQUEST_CANCELLATION: {
@@ -1036,7 +1059,7 @@
 				desc.args[3] = req_64bit->resp_ptr;
 				desc.args[4] = req_64bit->resp_len;
 			}
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
@@ -1051,7 +1074,7 @@
 			desc.arginfo =
 				TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
 			desc.args[0] = req->app_or_session_id;
-			ret = scm_call2(smc_id, &desc);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
 			break;
 		}
 		default: {
@@ -2084,7 +2107,7 @@
 	}
 
 	/* find app_id & img_name from list */
-	if (!ptr_app) {
+	if (!ptr_app && data->client.app_arch != ELFCLASSNONE) {
 		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
 		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
 							list) {
@@ -6475,7 +6498,7 @@
 		if (ret)
 			break;
 
-		ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
+		ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
 
 		__qseecom_disable_clk(CLK_QSEE);
 
@@ -8668,8 +8691,10 @@
 
 	desc.args[0] = FEATURE_ID_WHITELIST;
 	desc.arginfo = SCM_ARGS(1);
-	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO,
+	mutex_lock(&app_access_lock);
+	ret = __qseecom_scm_call2_locked(SCM_SIP_FNID(SCM_SVC_INFO,
 		GET_FEAT_VERSION_CMD), &desc);
+	mutex_unlock(&app_access_lock);
 	if (!ret)
 		version = desc.ret[0];
 
@@ -8752,8 +8777,10 @@
 	qseecom.send_resp_flag = 0;
 
 	qseecom.qsee_version = QSEEE_VERSION_00;
+	mutex_lock(&app_access_lock);
 	rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
 		&resp, sizeof(resp));
+	mutex_unlock(&app_access_lock);
 	pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
 	if (rc) {
 		pr_err("Failed to get QSEE version info %d\n", rc);
@@ -8906,9 +8933,11 @@
 				rc = -EIO;
 				goto exit_deinit_clock;
 			}
+			mutex_lock(&app_access_lock);
 			rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
 					cmd_buf, cmd_len,
 					&resp, sizeof(resp));
+			mutex_unlock(&app_access_lock);
 			__qseecom_disable_clk(CLK_QSEE);
 			if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
 				pr_err("send secapp reg fail %d resp.res %d\n",
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index f50e922..413308a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -543,6 +543,7 @@
 int mmc_init_clk_scaling(struct mmc_host *host)
 {
 	int err;
+	struct devfreq *devfreq;
 
 	if (!host || !host->card) {
 		pr_err("%s: unexpected host/card parameters\n",
@@ -593,22 +594,34 @@
 		return err;
 	}
 
+	dev_pm_opp_add(mmc_classdev(host),
+		host->clk_scaling.devfreq_profile.freq_table[0], 0);
+	dev_pm_opp_add(mmc_classdev(host),
+		host->clk_scaling.devfreq_profile.freq_table[1], 0);
+
 	pr_debug("%s: adding devfreq with: upthreshold=%u downthreshold=%u polling=%u\n",
 		mmc_hostname(host),
 		host->clk_scaling.ondemand_gov_data.upthreshold,
 		host->clk_scaling.ondemand_gov_data.downdifferential,
 		host->clk_scaling.devfreq_profile.polling_ms);
-	host->clk_scaling.devfreq = devfreq_add_device(
+
+	devfreq = devfreq_add_device(
 		mmc_classdev(host),
 		&host->clk_scaling.devfreq_profile,
 		"simple_ondemand",
 		&host->clk_scaling.ondemand_gov_data);
-	if (!host->clk_scaling.devfreq) {
+
+	if (IS_ERR(devfreq)) {
 		pr_err("%s: unable to register with devfreq\n",
 			mmc_hostname(host));
-		return -EPERM;
+		dev_pm_opp_remove(mmc_classdev(host),
+			host->clk_scaling.devfreq_profile.freq_table[0]);
+		dev_pm_opp_remove(mmc_classdev(host),
+			host->clk_scaling.devfreq_profile.freq_table[1]);
+		return PTR_ERR(devfreq);
 	}
 
+	host->clk_scaling.devfreq = devfreq;
 	pr_debug("%s: clk scaling is enabled for device %s (%pK) with devfreq %pK (clock = %uHz)\n",
 		mmc_hostname(host),
 		dev_name(mmc_classdev(host)),
@@ -765,6 +778,11 @@
 		return err;
 	}
 
+	dev_pm_opp_remove(mmc_classdev(host),
+		host->clk_scaling.devfreq_profile.freq_table[0]);
+	dev_pm_opp_remove(mmc_classdev(host),
+		host->clk_scaling.devfreq_profile.freq_table[1]);
+
 	kfree(host->clk_scaling.devfreq_profile.freq_table);
 
 	host->clk_scaling.devfreq = NULL;
@@ -1787,12 +1805,13 @@
 	unsigned long flags;
 	int retry_cnt = delay_ms/10;
 	bool pm = false;
+	struct task_struct *task = current;
 
 	do {
 		spin_lock_irqsave(&host->lock, flags);
-		if (!host->claimed || host->claimer->task == current) {
+		if (!host->claimed || mmc_ctx_matches(host, NULL, task)) {
 			host->claimed = 1;
-			host->claimer->task = current;
+			mmc_ctx_set_claimer(host, NULL, task);
 			host->claim_cnt += 1;
 			claimed_host = 1;
 			if (host->claim_cnt == 1)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 00cdf6d..8cc2aac 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -32,6 +32,7 @@
 #include "pwrseq.h"
 
 #define DEFAULT_CMD6_TIMEOUT_MS	500
+#define MIN_CACHE_EN_TIMEOUT_MS 1600
 
 static const unsigned int tran_exp[] = {
 	10000,		100000,		1000000,	10000000,
@@ -547,8 +548,7 @@
 			card->cid.year += 16;
 
 		/* check whether the eMMC card supports BKOPS */
-		if (!mmc_card_broken_hpi(card) &&
-		    (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) &&
+		if ((ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) &&
 				card->ext_csd.hpi) {
 			card->ext_csd.bkops = 1;
 			card->ext_csd.man_bkops_en =
@@ -2146,22 +2146,30 @@
 		if (err) {
 			pr_warn("%s: Enabling HPI failed\n",
 				mmc_hostname(card->host));
+			card->ext_csd.hpi_en = 0;
 			err = 0;
-		} else
+		} else {
 			card->ext_csd.hpi_en = 1;
+		}
 	}
 
 	/*
-	 * If cache size is higher than 0, this indicates
-	 * the existence of cache and it can be turned on.
+	 * If cache size is higher than 0, this indicates the existence of cache
+	 * and it can be turned on. Note that some eMMCs from Micron has been
+	 * reported to need ~800 ms timeout, while enabling the cache after
+	 * sudden power failure tests. Let's extend the timeout to a minimum of
+	 * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
 	 * If HPI is not supported then cache shouldn't be enabled.
 	 */
-	if (!mmc_card_broken_hpi(card) && card->ext_csd.cache_size > 0) {
+	if (card->ext_csd.cache_size > 0) {
 		if (card->ext_csd.hpi_en &&
 			(!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
+			unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
+
+			timeout_ms = max(card->ext_csd.generic_cmd6_time,
+					 timeout_ms);
 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-					EXT_CSD_CACHE_CTRL, 1,
-					card->ext_csd.generic_cmd6_time);
+					EXT_CSD_CACHE_CTRL, 1, timeout_ms);
 			if (err && err != -EBADMSG) {
 				pr_err("%s: %s: fail on CACHE_CTRL ON %d\n",
 					mmc_hostname(host), __func__, err);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 6ef491b..30d625c 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1197,8 +1197,6 @@
 		return;
 	}
 
-	mmc_get_card(host->card, NULL);
-
 	/*
 	 * Just check if our card has been removed.
 	 */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index be53044..fbc56ee 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1954,13 +1954,14 @@
 			}
 
 			atmci_request_end(host, host->mrq);
-			state = STATE_IDLE;
+			goto unlock; /* atmci_request_end() sets host->state */
 			break;
 		}
 	} while (state != prev_state);
 
 	host->state = state;
 
+unlock:
 	spin_unlock(&host->lock);
 }
 
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 768972a..0d3b747 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1427,6 +1427,8 @@
 
 err:
 	dev_dbg(dev, "%s -> err %d\n", __func__, ret);
+	if (host->dma_chan_rxtx)
+		dma_release_channel(host->dma_chan_rxtx);
 	mmc_free_host(mmc);
 
 	return ret;
diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
index 54c3fbb..db56d4f 100644
--- a/drivers/mmc/host/dw_mmc-bluefield.c
+++ b/drivers/mmc/host/dw_mmc-bluefield.c
@@ -1,11 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2018 Mellanox Technologies.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
  */
 
 #include <linux/bitfield.h>
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c201c37..ef9deaa 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -174,6 +174,8 @@
 	struct sd_emmc_desc *descs;
 	dma_addr_t descs_dma_addr;
 
+	int irq;
+
 	bool vqmmc_enabled;
 };
 
@@ -1181,7 +1183,7 @@
 	struct resource *res;
 	struct meson_host *host;
 	struct mmc_host *mmc;
-	int ret, irq;
+	int ret;
 
 	mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
 	if (!mmc)
@@ -1228,8 +1230,8 @@
 		goto free_host;
 	}
 
-	irq = platform_get_irq(pdev, 0);
-	if (irq <= 0) {
+	host->irq = platform_get_irq(pdev, 0);
+	if (host->irq <= 0) {
 		dev_err(&pdev->dev, "failed to get interrupt resource.\n");
 		ret = -EINVAL;
 		goto free_host;
@@ -1283,9 +1285,8 @@
 	writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
 	       host->regs + SD_EMMC_IRQ_EN);
 
-	ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq,
-					meson_mmc_irq_thread, IRQF_SHARED,
-					NULL, host);
+	ret = request_threaded_irq(host->irq, meson_mmc_irq,
+			meson_mmc_irq_thread, IRQF_SHARED, NULL, host);
 	if (ret)
 		goto err_init_clk;
 
@@ -1303,7 +1304,7 @@
 	if (host->bounce_buf == NULL) {
 		dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
 		ret = -ENOMEM;
-		goto err_init_clk;
+		goto err_free_irq;
 	}
 
 	host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
@@ -1322,6 +1323,8 @@
 err_bounce_buf:
 	dma_free_coherent(host->dev, host->bounce_buf_size,
 			  host->bounce_buf, host->bounce_dma_addr);
+err_free_irq:
+	free_irq(host->irq, host);
 err_init_clk:
 	clk_disable_unprepare(host->mmc_clk);
 err_core_clk:
@@ -1339,6 +1342,7 @@
 
 	/* disable interrupts */
 	writel(0, host->regs + SD_EMMC_IRQ_EN);
+	free_irq(host->irq, host);
 
 	dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
 			  host->descs, host->descs_dma_addr);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 0484138..f171cce 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -784,7 +784,7 @@
 
 	if (timing == MMC_TIMING_MMC_HS400 &&
 	    host->dev_comp->hs400_tune)
-		sdr_set_field(host->base + PAD_CMD_TUNE,
+		sdr_set_field(host->base + tune_reg,
 			      MSDC_PAD_TUNE_CMDRRDLY,
 			      host->hs400_cmd_int_delay);
 	dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 68760d4..b23c57e 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2066,7 +2066,6 @@
 	mmc->max_blk_size = 512;       /* Block Length at max can be 1024 */
 	mmc->max_blk_count = 0xFFFF;    /* No. of Blocks is 16 bits */
 	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
-	mmc->max_seg_size = mmc->max_req_size;
 
 	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
 		     MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
@@ -2096,6 +2095,17 @@
 		goto err_irq;
 	}
 
+	/*
+	 * Limit the maximum segment size to the lower of the request size
+	 * and the DMA engine device segment size limits.  In reality, with
+	 * 32-bit transfers, the DMA engine can do longer segments than this
+	 * but there is no way to represent that in the DMA model - if we
+	 * increase this figure here, we get warnings from the DMA API debug.
+	 */
+	mmc->max_seg_size = min3(mmc->max_req_size,
+			dma_get_max_seg_size(host->rx_chan->device->dev),
+			dma_get_max_seg_size(host->tx_chan->device->dev));
+
 	/* Request IRQ for MMC operations */
 	ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
 			mmc_hostname(mmc), host);
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index d0e83db..94eeed2 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -279,7 +279,10 @@
 
 	iproc_host->data = iproc_data;
 
-	mmc_of_parse(host->mmc);
+	ret = mmc_of_parse(host->mmc);
+	if (ret)
+		goto err;
+
 	sdhci_get_of_property(pdev);
 
 	host->mmc->caps |= iproc_host->data->mmc_caps;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index a82a8cb..495cab4 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1170,6 +1170,29 @@
 			drv_type);
 }
 
+static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u32 config, oldconfig = readl_relaxed(host->ioaddr +
+					      msm_host_offset->CORE_DLL_CONFIG);
+
+	config = oldconfig;
+	if (enable) {
+		config |= CORE_CDR_EN;
+		config &= ~CORE_CDR_EXT_EN;
+	} else {
+		config &= ~CORE_CDR_EN;
+		config |= CORE_CDR_EXT_EN;
+	}
+
+	if (config != oldconfig)
+		writel_relaxed(config, host->ioaddr +
+			       msm_host_offset->CORE_DLL_CONFIG);
+}
+
 int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
 {
 	unsigned long flags;
@@ -1195,8 +1218,14 @@
 	if (host->clock <= CORE_FREQ_100MHZ ||
 		!((ios.timing == MMC_TIMING_MMC_HS400) ||
 		(ios.timing == MMC_TIMING_MMC_HS200) ||
-		(ios.timing == MMC_TIMING_UHS_SDR104)))
+		(ios.timing == MMC_TIMING_UHS_SDR104))) {
+		msm_host->use_cdr = false;
+		sdhci_msm_set_cdr(host, false);
 		return 0;
+	}
+
+	/* Clock-Data-Recovery used to dynamically adjust RX sampling point */
+	msm_host->use_cdr = true;
 
 	/*
 	 * Don't allow re-tuning for CRC errors observed for any commands
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 902edd4..5c1c961 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -252,6 +252,8 @@
 	bool use_7nm_dll;
 	int soc_min_rev;
 	struct workqueue_struct *pm_qos_wq;
+	bool use_cdr;
+	u32 transfer_mode;
 };
 
 extern char *saved_command_line;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 1eb37e1..ce02455 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -4764,7 +4764,7 @@
 	mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
 		(host->flags & SDHCI_USE_ADMA) ?
 		((host->flags & SDHCI_USE_64_BIT_DMA) ?
-		"64-bit ADMA" : "32-bit ADMA") :
+		"64-bit ADMA" : "32-bit ADMA") : "",
 		((host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"));
 
 	sdhci_enable_card_detection(host);
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index c7573cc..9c90695 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -444,9 +444,14 @@
 	writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
 }
 
-static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
+static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
 {
+	u32 reg;
+
+	reg = readl_relaxed(nfc->regs + NDSR);
 	writel_relaxed(int_mask, nfc->regs + NDSR);
+
+	return reg & int_mask;
 }
 
 static void marvell_nfc_force_byte_access(struct nand_chip *chip,
@@ -613,6 +618,7 @@
 static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
 {
 	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+	u32 pending;
 	int ret;
 
 	/* Timeout is expressed in ms */
@@ -625,8 +631,13 @@
 	ret = wait_for_completion_timeout(&nfc->complete,
 					  msecs_to_jiffies(timeout_ms));
 	marvell_nfc_disable_int(nfc, NDCR_RDYM);
-	marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
-	if (!ret) {
+	pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
+
+	/*
+	 * In case the interrupt was not served in the required time frame,
+	 * check if the ISR was not served or if something went actually wrong.
+	 */
+	if (ret && !pending) {
 		dev_err(nfc->dev, "Timeout waiting for RB signal\n");
 		return -ETIMEDOUT;
 	}
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 4546ac0..b1683d7 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -1938,7 +1938,7 @@
 	case NAND_OMAP_PREFETCH_DMA:
 		dma_cap_zero(mask);
 		dma_cap_set(DMA_SLAVE, mask);
-		info->dma = dma_request_chan(dev, "rxtx");
+		info->dma = dma_request_chan(dev->parent, "rxtx");
 
 		if (IS_ERR(info->dma)) {
 			dev_err(dev, "DMA engine request failed\n");
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 8815f3e..880e75f 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2839,6 +2839,16 @@
 	if (ret)
 		return ret;
 
+	if (nandc->props->is_bam) {
+		free_bam_transaction(nandc);
+		nandc->bam_txn = alloc_bam_transaction(nandc);
+		if (!nandc->bam_txn) {
+			dev_err(nandc->dev,
+				"failed to allocate bam transaction\n");
+			return -ENOMEM;
+		}
+	}
+
 	ret = mtd_device_register(mtd, NULL, 0);
 	if (ret)
 		nand_cleanup(chip);
@@ -2853,16 +2863,6 @@
 	struct qcom_nand_host *host;
 	int ret;
 
-	if (nandc->props->is_bam) {
-		free_bam_transaction(nandc);
-		nandc->bam_txn = alloc_bam_transaction(nandc);
-		if (!nandc->bam_txn) {
-			dev_err(nandc->dev,
-				"failed to allocate bam transaction\n");
-			return -ENOMEM;
-		}
-	}
-
 	for_each_available_child_of_node(dn, child) {
 		host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
 		if (!host) {
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 6cc9c92..37775fc 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -41,7 +41,7 @@
 
 config SPI_ATMEL_QUADSPI
 	tristate "Atmel Quad SPI Controller"
-	depends on ARCH_AT91 || (ARM && COMPILE_TEST)
+	depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110)
 	depends on OF && HAS_IOMEM
 	help
 	  This enables support for the Quad SPI controller in master mode.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3c59756..a6fcc5c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1947,6 +1947,9 @@
 	if (!bond_has_slaves(bond)) {
 		bond_set_carrier(bond);
 		eth_hw_addr_random(bond_dev);
+		bond->nest_level = SINGLE_DEPTH_NESTING;
+	} else {
+		bond->nest_level = dev_get_nest_level(bond_dev) + 1;
 	}
 
 	unblock_netpoll_tx();
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3b3f88f..c05e4d5 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -480,8 +480,6 @@
 struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
 {
 	struct can_priv *priv = netdev_priv(dev);
-	struct sk_buff *skb = priv->echo_skb[idx];
-	struct canfd_frame *cf;
 
 	if (idx >= priv->echo_skb_max) {
 		netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
@@ -489,20 +487,21 @@
 		return NULL;
 	}
 
-	if (!skb) {
-		netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
-			   __func__, idx);
-		return NULL;
+	if (priv->echo_skb[idx]) {
+		/* Using "struct canfd_frame::len" for the frame
+		 * length is supported on both CAN and CANFD frames.
+		 */
+		struct sk_buff *skb = priv->echo_skb[idx];
+		struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+		u8 len = cf->len;
+
+		*len_ptr = len;
+		priv->echo_skb[idx] = NULL;
+
+		return skb;
 	}
 
-	/* Using "struct canfd_frame::len" for the frame
-	 * length is supported on both CAN and CANFD frames.
-	 */
-	cf = (struct canfd_frame *)skb->data;
-	*len_ptr = cf->len;
-	priv->echo_skb[idx] = NULL;
-
-	return skb;
+	return NULL;
 }
 
 /*
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 75ce113..ae219b8 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1004,7 +1004,7 @@
 		}
 	} else {
 		/* clear and invalidate unused mailboxes first */
-		for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
+		for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < ARRAY_SIZE(regs->mb); i++) {
 			priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
 				    &regs->mb[i].can_ctrl);
 		}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8da3d39..258918d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2391,6 +2391,107 @@
 	return mv88e6xxx_g1_stats_clear(chip);
 }
 
+/* The mv88e6390 has some hidden registers used for debug and
+ * development. The errata also makes use of them.
+ */
+static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port,
+				  int reg, u16 val)
+{
+	u16 ctrl;
+	int err;
+
+	err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT,
+				   PORT_RESERVED_1A, val);
+	if (err)
+		return err;
+
+	ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE |
+	       PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
+	       reg;
+
+	return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
+				    PORT_RESERVED_1A, ctrl);
+}
+
+static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip)
+{
+	return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT,
+			      PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY);
+}
+
+
+static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port,
+				  int reg, u16 *val)
+{
+	u16 ctrl;
+	int err;
+
+	ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ |
+	       PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
+	       reg;
+
+	err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
+				   PORT_RESERVED_1A, ctrl);
+	if (err)
+		return err;
+
+	err = mv88e6390_hidden_wait(chip);
+	if (err)
+		return err;
+
+	return 	mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT,
+				    PORT_RESERVED_1A, val);
+}
+
+/* Check if the errata has already been applied. */
+static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
+{
+	int port;
+	int err;
+	u16 val;
+
+	for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+		err = mv88e6390_hidden_read(chip, port, 0, &val);
+		if (err) {
+			dev_err(chip->dev,
+				"Error reading hidden register: %d\n", err);
+			return false;
+		}
+		if (val != 0x01c0)
+			return false;
+	}
+
+	return true;
+}
+
+/* The 6390 copper ports have an errata which require poking magic
+ * values into undocumented hidden registers and then performing a
+ * software reset.
+ */
+static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
+{
+	int port;
+	int err;
+
+	if (mv88e6390_setup_errata_applied(chip))
+		return 0;
+
+	/* Set the ports into blocking mode */
+	for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+		err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED);
+		if (err)
+			return err;
+	}
+
+	for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+		err = mv88e6390_hidden_write(chip, port, 0, 0x01c0);
+		if (err)
+			return err;
+	}
+
+	return mv88e6xxx_software_reset(chip);
+}
+
 static int mv88e6xxx_setup(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_chip *chip = ds->priv;
@@ -2403,6 +2504,12 @@
 
 	mutex_lock(&chip->reg_lock);
 
+	if (chip->info->ops->setup_errata) {
+		err = chip->info->ops->setup_errata(chip);
+		if (err)
+			goto unlock;
+	}
+
 	/* Cache the cmode of each port. */
 	for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
 		if (chip->info->ops->port_get_cmode) {
@@ -3201,6 +3308,7 @@
 
 static const struct mv88e6xxx_ops mv88e6190_ops = {
 	/* MV88E6XXX_FAMILY_6390 */
+	.setup_errata = mv88e6390_setup_errata,
 	.irl_init_all = mv88e6390_g2_irl_init_all,
 	.get_eeprom = mv88e6xxx_g2_get_eeprom8,
 	.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3243,6 +3351,7 @@
 
 static const struct mv88e6xxx_ops mv88e6190x_ops = {
 	/* MV88E6XXX_FAMILY_6390 */
+	.setup_errata = mv88e6390_setup_errata,
 	.irl_init_all = mv88e6390_g2_irl_init_all,
 	.get_eeprom = mv88e6xxx_g2_get_eeprom8,
 	.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3285,6 +3394,7 @@
 
 static const struct mv88e6xxx_ops mv88e6191_ops = {
 	/* MV88E6XXX_FAMILY_6390 */
+	.setup_errata = mv88e6390_setup_errata,
 	.irl_init_all = mv88e6390_g2_irl_init_all,
 	.get_eeprom = mv88e6xxx_g2_get_eeprom8,
 	.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3374,6 +3484,7 @@
 
 static const struct mv88e6xxx_ops mv88e6290_ops = {
 	/* MV88E6XXX_FAMILY_6390 */
+	.setup_errata = mv88e6390_setup_errata,
 	.irl_init_all = mv88e6390_g2_irl_init_all,
 	.get_eeprom = mv88e6xxx_g2_get_eeprom8,
 	.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3675,6 +3786,7 @@
 
 static const struct mv88e6xxx_ops mv88e6390_ops = {
 	/* MV88E6XXX_FAMILY_6390 */
+	.setup_errata = mv88e6390_setup_errata,
 	.irl_init_all = mv88e6390_g2_irl_init_all,
 	.get_eeprom = mv88e6xxx_g2_get_eeprom8,
 	.set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3722,6 +3834,7 @@
 
 static const struct mv88e6xxx_ops mv88e6390x_ops = {
 	/* MV88E6XXX_FAMILY_6390 */
+	.setup_errata = mv88e6390_setup_errata,
 	.irl_init_all = mv88e6390_g2_irl_init_all,
 	.get_eeprom = mv88e6xxx_g2_get_eeprom8,
 	.set_eeprom = mv88e6xxx_g2_set_eeprom8,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index f9ecb78..546651d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -300,6 +300,11 @@
 };
 
 struct mv88e6xxx_ops {
+	/* Switch Setup Errata, called early in the switch setup to
+	 * allow any errata actions to be performed
+	 */
+	int (*setup_errata)(struct mv88e6xxx_chip *chip);
+
 	int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
 	int (*ip_pri_map)(struct mv88e6xxx_chip *chip);
 
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index f32f56a..b319100 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -251,6 +251,16 @@
 /* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */
 #define MV88E6095_PORT_IEEE_PRIO_REMAP_4567	0x19
 
+/* Offset 0x1a: Magic undocumented errata register */
+#define PORT_RESERVED_1A			0x1a
+#define PORT_RESERVED_1A_BUSY			BIT(15)
+#define PORT_RESERVED_1A_WRITE			BIT(14)
+#define PORT_RESERVED_1A_READ			0
+#define PORT_RESERVED_1A_PORT_SHIFT		5
+#define PORT_RESERVED_1A_BLOCK			(0xf << 10)
+#define PORT_RESERVED_1A_CTRL_PORT		4
+#define PORT_RESERVED_1A_DATA_PORT		5
+
 int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
 			u16 *val);
 int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
index b4b839a..ad41ec6 100644
--- a/drivers/net/dsa/realtek-smi.c
+++ b/drivers/net/dsa/realtek-smi.c
@@ -347,16 +347,17 @@
 	struct device_node *mdio_np;
 	int ret;
 
-	mdio_np = of_find_compatible_node(smi->dev->of_node, NULL,
-					  "realtek,smi-mdio");
+	mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
 	if (!mdio_np) {
 		dev_err(smi->dev, "no MDIO bus node\n");
 		return -ENODEV;
 	}
 
 	smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
-	if (!smi->slave_mii_bus)
-		return -ENOMEM;
+	if (!smi->slave_mii_bus) {
+		ret = -ENOMEM;
+		goto err_put_node;
+	}
 	smi->slave_mii_bus->priv = smi;
 	smi->slave_mii_bus->name = "SMI slave MII";
 	smi->slave_mii_bus->read = realtek_smi_mdio_read;
@@ -371,10 +372,15 @@
 	if (ret) {
 		dev_err(smi->dev, "unable to register MDIO bus %s\n",
 			smi->slave_mii_bus->id);
-		of_node_put(mdio_np);
+		goto err_put_node;
 	}
 
 	return 0;
+
+err_put_node:
+	of_node_put(mdio_np);
+
+	return ret;
 }
 
 static int realtek_smi_probe(struct platform_device *pdev)
@@ -457,6 +463,8 @@
 	struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
 
 	dsa_unregister_switch(smi->ds);
+	if (smi->slave_mii_bus)
+		of_node_put(smi->slave_mii_bus->dev.of_node);
 	gpiod_set_value(smi->reset, 1);
 
 	return 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index d272dc6..b40d437 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -431,8 +431,6 @@
 #define MAC_MDIOSCAR_PA_WIDTH		5
 #define MAC_MDIOSCAR_RA_INDEX		0
 #define MAC_MDIOSCAR_RA_WIDTH		16
-#define MAC_MDIOSCAR_REG_INDEX		0
-#define MAC_MDIOSCAR_REG_WIDTH		21
 #define MAC_MDIOSCCDR_BUSY_INDEX	22
 #define MAC_MDIOSCCDR_BUSY_WIDTH	1
 #define MAC_MDIOSCCDR_CMD_INDEX		16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 1e929a1..4666084 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1284,6 +1284,20 @@
 	}
 }
 
+static unsigned int xgbe_create_mdio_sca(int port, int reg)
+{
+	unsigned int mdio_sca, da;
+
+	da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
+
+	mdio_sca = 0;
+	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
+	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
+	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
+
+	return mdio_sca;
+}
+
 static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
 				   int reg, u16 val)
 {
@@ -1291,9 +1305,7 @@
 
 	reinit_completion(&pdata->mdio_complete);
 
-	mdio_sca = 0;
-	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
-	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+	mdio_sca = xgbe_create_mdio_sca(addr, reg);
 	XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
 
 	mdio_sccd = 0;
@@ -1317,9 +1329,7 @@
 
 	reinit_completion(&pdata->mdio_complete);
 
-	mdio_sca = 0;
-	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
-	XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+	mdio_sca = xgbe_create_mdio_sca(addr, reg);
 	XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
 
 	mdio_sccd = 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 3b889ef..50dd6bf 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -29,9 +29,6 @@
 #define RES_RING_CSR	1
 #define RES_RING_CMD	2
 
-static const struct of_device_id xgene_enet_of_match[];
-static const struct acpi_device_id xgene_enet_acpi_match[];
-
 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
 {
 	struct xgene_enet_raw_desc16 *raw_desc;
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index 7d623e9..c81d231 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -21,4 +21,9 @@
 	---help---
 	  This enables the support for the aQuantia AQtion(tm) Ethernet card.
 
+config AQFWD
+	tristate "aQuantia Forwarding driver"
+	depends on PCI && (X86_64 || ARM64)
+	---help---
+	  This enables the support for forwarding driver for the aQuantia AQtion(tm) Ethernet card.
 endif # NET_VENDOR_AQUANTIA
diff --git a/drivers/net/ethernet/aquantia/Makefile b/drivers/net/ethernet/aquantia/Makefile
index 4f4897b..67b8226 100644
--- a/drivers/net/ethernet/aquantia/Makefile
+++ b/drivers/net/ethernet/aquantia/Makefile
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_AQTION) += atlantic/
+obj-$(CONFIG_AQFWD)  += atlantic-fwd/
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/Makefile b/drivers/net/ethernet/aquantia/atlantic-fwd/Makefile
new file mode 100644
index 0000000..ca94832
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/Makefile
@@ -0,0 +1,42 @@
+################################################################################
+#
+# aQuantia Ethernet Controller AQtion Linux Driver
+# Copyright(c) 2014-2017 aQuantia Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information: <rdc-drv@aquantia.com>
+# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA
+#
+################################################################################
+
+#
+# Makefile for the AQtion(tm) Ethernet driver
+#
+
+obj-$(CONFIG_AQFWD) += atlantic-fwd.o
+
+atlantic-fwd-objs := atl_fw.o \
+		     atl_hw.o \
+		     atl_main.o \
+		     atl_ring.o \
+		     atl_ethtool.o \
+		     atl_trace.o \
+		     atl_fwd.o \
+		     atl_compat.o \
+		     atl_hwmon.o
+
+CFLAGS_atl_trace.o := -I$(src)
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_common.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_common.h
new file mode 100644
index 0000000..f60358c
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_common.h
@@ -0,0 +1,371 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_COMMON_H_
+#define _ATL_COMMON_H_
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/moduleparam.h>
+
+#define ATL_VERSION "1.0.15"
+
+struct atl_nic;
+
+#include "atl_compat.h"
+#include "atl_hw.h"
+
+#define ATL_MAX_QUEUES 8
+
+#include "atl_fwd.h"
+
+struct atl_rx_ring_stats {
+	uint64_t packets;
+	uint64_t bytes;
+	uint64_t linear_dropped;
+	uint64_t alloc_skb_failed;
+	uint64_t reused_head_page;
+	uint64_t reused_data_page;
+	uint64_t alloc_head_page;
+	uint64_t alloc_data_page;
+	uint64_t alloc_head_page_failed;
+	uint64_t alloc_data_page_failed;
+	uint64_t non_eop_descs;
+	uint64_t mac_err;
+	uint64_t csum_err;
+	uint64_t multicast;
+};
+
+struct atl_tx_ring_stats {
+	uint64_t packets;
+	uint64_t bytes;
+	uint64_t tx_busy;
+	uint64_t tx_restart;
+	uint64_t dma_map_failed;
+};
+
+struct atl_ring_stats {
+	union {
+		struct atl_rx_ring_stats rx;
+		struct atl_tx_ring_stats tx;
+	};
+};
+
+struct atl_ether_stats {
+	uint64_t rx_pause;
+	uint64_t tx_pause;
+	uint64_t rx_ether_drops;
+	uint64_t rx_ether_octets;
+	uint64_t rx_ether_pkts;
+	uint64_t rx_ether_broacasts;
+	uint64_t rx_ether_multicasts;
+	uint64_t rx_ether_crc_align_errs;
+	uint64_t rx_filter_host;
+	uint64_t rx_filter_lost;
+};
+
+struct atl_global_stats {
+	struct atl_rx_ring_stats rx;
+	struct atl_tx_ring_stats tx;
+
+	/* MSM counters can't be reset without full HW reset, so
+	 * store them in relative form:
+	 * eth[i] == HW_counter - eth_base[i] */
+	struct atl_ether_stats eth;
+	struct atl_ether_stats eth_base;
+};
+
+enum {
+	ATL_RXF_VLAN_BASE = 0,
+	ATL_RXF_VLAN_MAX = ATL_VLAN_FLT_NUM,
+	ATL_RXF_ETYPE_BASE = ATL_RXF_VLAN_BASE + ATL_RXF_VLAN_MAX,
+	ATL_RXF_ETYPE_MAX = ATL_ETYPE_FLT_NUM,
+	ATL_RXF_NTUPLE_BASE = ATL_RXF_ETYPE_BASE + ATL_RXF_ETYPE_MAX,
+	ATL_RXF_NTUPLE_MAX = ATL_NTUPLE_FLT_NUM,
+};
+
+enum atl_rxf_common_cmd {
+	ATL_RXF_EN = BIT(31),
+	ATL_RXF_RXQ_MSK = BIT(5) - 1,
+	ATL_RXF_ACT_SHIFT = 16,
+	ATL_RXF_ACT_MASK = BIT(3) - 1,
+	ATL_RXF_ACT_TOHOST = BIT(0) << ATL_RXF_ACT_SHIFT,
+};
+
+enum atl_ntuple_cmd {
+	ATL_NTC_EN = ATL_RXF_EN, /* Filter enabled */
+	ATL_NTC_V6 = BIT(30),	/* IPv6 mode -- only valid in filters
+				 * 0 and 4 */
+	ATL_NTC_SA = BIT(29),	/* Match source address */
+	ATL_NTC_DA = BIT(28),	/* Match destination address */
+	ATL_NTC_SP = BIT(27),	/* Match source port */
+	ATL_NTC_DP = BIT(26),	/* Match destination port */
+	ATL_NTC_PROTO = BIT(25), /* Match L4 proto */
+	ATL_NTC_ARP = BIT(24),
+	ATL_NTC_RXQ = BIT(23),	/* Assign Rx queue */
+	ATL_NTC_ACT_SHIFT = ATL_RXF_ACT_SHIFT,
+	ATL_NTC_RXQ_SHIFT = 8,
+	ATL_NTC_RXQ_MASK = ATL_RXF_RXQ_MSK << ATL_NTC_RXQ_SHIFT,
+	ATL_NTC_L4_MASK = BIT(3) - 1,
+	ATL_NTC_L4_TCP = 0,
+	ATL_NTC_L4_UDP = 1,
+	ATL_NTC_L4_SCTP = 2,
+	ATL_NTC_L4_ICMP = 3,
+};
+
+struct atl_rxf_ntuple {
+	union {
+		struct {
+			__be32 dst_ip4[ATL_RXF_NTUPLE_MAX];
+			__be32 src_ip4[ATL_RXF_NTUPLE_MAX];
+		};
+		struct {
+			__be32 dst_ip6[ATL_RXF_NTUPLE_MAX / 4][4];
+			__be32 src_ip6[ATL_RXF_NTUPLE_MAX / 4][4];
+		};
+	};
+	__be16 dst_port[ATL_RXF_NTUPLE_MAX];
+	__be16 src_port[ATL_RXF_NTUPLE_MAX];
+	uint32_t cmd[ATL_RXF_NTUPLE_MAX];
+	int count;
+};
+
+enum atl_vlan_cmd {
+	ATL_VLAN_EN = ATL_RXF_EN,
+	ATL_VLAN_RXQ = BIT(28),
+	ATL_VLAN_RXQ_SHIFT = 20,
+	ATL_VLAN_RXQ_MASK = ATL_RXF_RXQ_MSK << ATL_VLAN_RXQ_SHIFT,
+	ATL_VLAN_ACT_SHIFT = ATL_RXF_ACT_SHIFT,
+	ATL_VLAN_VID_MASK = BIT(12) - 1,
+};
+
+#define ATL_VID_MAP_LEN BITS_TO_LONGS(BIT(12))
+
+struct atl_rxf_vlan {
+	uint32_t cmd[ATL_RXF_VLAN_MAX];
+	int count;
+	unsigned long map[ATL_VID_MAP_LEN];
+	int vlans_active;
+	int promisc_count;
+};
+
+enum atl_etype_cmd {
+	ATL_ETYPE_EN = ATL_RXF_EN,
+	ATL_ETYPE_RXQ = BIT(29),
+	ATL_ETYPE_RXQ_SHIFT = 20,
+	ATL_ETYPE_RXQ_MASK = ATL_RXF_RXQ_MSK << ATL_ETYPE_RXQ_SHIFT,
+	ATL_ETYPE_ACT_SHIFT = ATL_RXF_ACT_SHIFT,
+	ATL_ETYPE_VAL_MASK = BIT(16) - 1,
+};
+
+struct atl_rxf_etype {
+	uint32_t cmd[ATL_RXF_ETYPE_MAX];
+	int count;
+};
+
+struct atl_queue_vec;
+
+#define ATL_NUM_FWD_RINGS ATL_MAX_QUEUES
+#define ATL_FWD_RING_BASE ATL_MAX_QUEUES /* Use TC 1 for offload
+					  * engine rings */
+#define ATL_NUM_MSI_VECS 32
+#define ATL_NUM_NON_RING_IRQS 1
+
+#define ATL_RXF_RING_ANY 32
+
+#define ATL_FWD_MSI_BASE (ATL_MAX_QUEUES + ATL_NUM_NON_RING_IRQS)
+
+enum atl_fwd_dir {
+	ATL_FWDIR_RX = 0,
+	ATL_FWDIR_TX = 1,
+	ATL_FWDIR_NUM,
+};
+
+struct atl_fwd {
+	unsigned long ring_map[ATL_FWDIR_NUM];
+	struct atl_fwd_ring *rings[ATL_FWDIR_NUM][ATL_NUM_FWD_RINGS];
+	unsigned long msi_map;
+};
+
+struct atl_nic {
+	struct net_device *ndev;
+
+	struct atl_queue_vec *qvecs;
+	int nvecs;
+	struct atl_hw hw;
+	unsigned flags;
+	unsigned long state;
+	uint32_t priv_flags;
+	struct timer_list link_timer;
+	int max_mtu;
+	int requested_nvecs;
+	int requested_rx_size;
+	int requested_tx_size;
+	int rx_intr_delay;
+	int tx_intr_delay;
+	struct atl_global_stats stats;
+	spinlock_t stats_lock;
+	struct work_struct work;
+
+	struct atl_fwd fwd;
+
+	struct atl_rxf_ntuple rxf_ntuple;
+	struct atl_rxf_vlan rxf_vlan;
+	struct atl_rxf_etype rxf_etype;
+};
+
+/* Flags only modified with RTNL lock held */
+enum atl_nic_flags {
+	ATL_FL_MULTIPLE_VECTORS = BIT(0),
+	ATL_FL_WOL = BIT(1),
+};
+
+enum atl_nic_state {
+	ATL_ST_UP,
+	ATL_ST_CONFIGURED,
+	ATL_ST_ENABLED,
+	ATL_ST_WORK_SCHED,
+};
+
+#define ATL_PF(_name) ATL_PF_ ## _name
+#define ATL_PF_BIT(_name) ATL_PF_ ## _name ## _BIT
+#define ATL_DEF_PF_BIT(_name) ATL_PF_BIT(_name) = BIT(ATL_PF(_name))
+
+enum atl_priv_flags {
+	ATL_PF_LPB_SYS_PB,
+	ATL_PF_LPB_SYS_DMA,
+	/* ATL_PF_LPB_NET_DMA, */
+	ATL_PF_LPI_RX_MAC,
+	ATL_PF_LPI_TX_MAC,
+	ATL_PF_LPI_RX_PHY,
+	ATL_PF_LPI_TX_PHY,
+	ATL_PF_STATS_RESET,
+	ATL_PF_STRIP_PAD,
+};
+
+enum atl_priv_flag_bits {
+	ATL_DEF_PF_BIT(LPB_SYS_PB),
+	ATL_DEF_PF_BIT(LPB_SYS_DMA),
+	/* ATL_DEF_PF_BIT(LPB_NET_DMA), */
+
+	ATL_PF_LPB_MASK = ATL_PF_BIT(LPB_SYS_DMA) | ATL_PF_BIT(LPB_SYS_PB)
+		/* | ATL_PF_BIT(LPB_NET_DMA) */,
+
+	ATL_DEF_PF_BIT(LPI_RX_MAC),
+	ATL_DEF_PF_BIT(LPI_TX_MAC),
+	ATL_DEF_PF_BIT(LPI_RX_PHY),
+	ATL_DEF_PF_BIT(LPI_TX_PHY),
+	ATL_PF_LPI_MASK = ATL_PF_BIT(LPI_RX_MAC) | ATL_PF_BIT(LPI_TX_MAC) |
+		ATL_PF_BIT(LPI_RX_PHY) | ATL_PF_BIT(LPI_TX_PHY),
+
+	ATL_DEF_PF_BIT(STATS_RESET),
+
+	ATL_DEF_PF_BIT(STRIP_PAD),
+
+	ATL_PF_RW_MASK = ATL_PF_LPB_MASK | ATL_PF_BIT(STATS_RESET) |
+		ATL_PF_BIT(STRIP_PAD),
+	ATL_PF_RO_MASK = ATL_PF_LPI_MASK,
+};
+
+#define ATL_MAX_MTU (16352 - ETH_FCS_LEN - ETH_HLEN)
+
+#define ATL_MAX_RING_SIZE (8192 - 8)
+#define ATL_RING_SIZE 4096
+
+extern const char atl_driver_name[];
+
+extern const struct ethtool_ops atl_ethtool_ops;
+
+extern int atl_max_queues;
+extern unsigned atl_rx_linear;
+extern unsigned atl_min_intr_delay;
+
+/* Logging conviniency macros.
+ *
+ * atl_dev_xxx are for low-level contexts and implicitly reference
+ * struct atl_hw *hw;
+ *
+ * atl_nic_xxx are for high-level contexts and implicitly reference
+ * struct atl_nic *nic; */
+#define atl_dev_dbg(fmt, args...)			\
+	dev_dbg(&hw->pdev->dev, fmt, ## args)
+#define atl_dev_info(fmt, args...)			\
+	dev_info(&hw->pdev->dev, fmt, ## args)
+#define atl_dev_warn(fmt, args...)			\
+	dev_warn(&hw->pdev->dev, fmt, ## args)
+#define atl_dev_err(fmt, args...)			\
+	dev_err(&hw->pdev->dev, fmt, ## args)
+
+#define atl_nic_dbg(fmt, args...)		\
+	dev_dbg(&nic->hw.pdev->dev, fmt, ## args)
+#define atl_nic_info(fmt, args...)		\
+	dev_info(&nic->hw.pdev->dev, fmt, ## args)
+#define atl_nic_warn(fmt, args...)		\
+	dev_warn(&nic->hw.pdev->dev, fmt, ## args)
+#define atl_nic_err(fmt, args...)		\
+	dev_err(&nic->hw.pdev->dev, fmt, ## args)
+
+#define atl_module_param(_name, _type, _mode)			\
+	module_param_named(_name, atl_ ## _name, _type, _mode)
+
+netdev_tx_t atl_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+int atl_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid);
+int atl_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid);
+void atl_set_rx_mode(struct net_device *ndev);
+int atl_set_features(struct net_device *ndev, netdev_features_t features);
+void atl_get_stats64(struct net_device *ndev,
+	struct rtnl_link_stats64 *stats);
+int atl_setup_datapath(struct atl_nic *nic);
+void atl_clear_datapath(struct atl_nic *nic);
+int atl_start_rings(struct atl_nic *nic);
+void atl_stop_rings(struct atl_nic *nic);
+int atl_alloc_rings(struct atl_nic *nic);
+void atl_free_rings(struct atl_nic *nic);
+irqreturn_t atl_ring_irq(int irq, void *priv);
+void atl_start_hw_global(struct atl_nic *nic);
+int atl_intr_init(struct atl_nic *nic);
+void atl_intr_release(struct atl_nic *nic);
+int atl_hw_reset(struct atl_hw *hw);
+int atl_fw_init(struct atl_hw *hw);
+int atl_reconfigure(struct atl_nic *nic);
+void atl_reset_stats(struct atl_nic *nic);
+void atl_update_global_stats(struct atl_nic *nic);
+void atl_set_loopback(struct atl_nic *nic, int idx, bool on);
+void atl_set_intr_mod(struct atl_nic *nic);
+void atl_update_ntuple_flt(struct atl_nic *nic, int idx);
+int atl_hwsem_get(struct atl_hw *hw, int idx);
+void atl_hwsem_put(struct atl_hw *hw, int idx);
+int __atl_msm_read(struct atl_hw *hw, uint32_t addr, uint32_t *val);
+int atl_msm_read(struct atl_hw *hw, uint32_t addr, uint32_t *val);
+int __atl_msm_write(struct atl_hw *hw, uint32_t addr, uint32_t val);
+int atl_msm_write(struct atl_hw *hw, uint32_t addr, uint32_t val);
+int atl_update_eth_stats(struct atl_nic *nic);
+void atl_adjust_eth_stats(struct atl_ether_stats *stats,
+	struct atl_ether_stats *base, bool add);
+void atl_fwd_release_rings(struct atl_nic *nic);
+int atl_get_lpi_timer(struct atl_nic *nic, uint32_t *lpi_delay);
+int atl_mdio_hwsem_get(struct atl_hw *hw);
+void atl_mdio_hwsem_put(struct atl_hw *hw);
+int __atl_mdio_read(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t *val);
+int atl_mdio_read(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t *val);
+int __atl_mdio_write(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t val);
+int atl_mdio_write(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t val);
+void atl_refresh_rxfs(struct atl_nic *nic);
+void atl_schedule_work(struct atl_nic *nic);
+int atl_hwmon_init(struct atl_nic *nic);
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.c
new file mode 100644
index 0000000..62a56aa
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.c
@@ -0,0 +1,157 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * Portions Copyright (C) various contributors (see specific commit references)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include "atl_common.h"
+#include "atl_ring.h"
+#include <linux/msi.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+
+#ifdef ATL_COMPAT_PCI_IRQ_VECTOR
+/* From commit aff171641d181ea573380efc3f559c9de4741fc5 */
+int atl_compat_pci_irq_vector(struct pci_dev *dev, unsigned int nr)
+{
+	if (dev->msix_enabled) {
+		struct msi_desc *entry;
+		int i = 0;
+
+		for_each_pci_msi_entry(entry, dev) {
+			if (i == nr)
+				return entry->irq;
+			i++;
+		}
+		WARN_ON_ONCE(1);
+		return -EINVAL;
+	}
+
+	if (dev->msi_enabled) {
+		struct msi_desc *entry = first_pci_msi_entry(dev);
+
+		if (WARN_ON_ONCE(nr >= entry->nvec_used))
+			return -EINVAL;
+	} else {
+		if (WARN_ON_ONCE(nr > 0))
+			return -EINVAL;
+	}
+
+	return dev->irq + nr;
+}
+
+#endif
+
+#ifdef ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY
+
+void atl_compat_set_affinity(int vector, struct atl_queue_vec *qvec)
+{
+	cpumask_t *cpumask = qvec ? &qvec->affinity_hint : NULL;
+
+	irq_set_affinity_hint(vector, cpumask);
+}
+
+void atl_compat_calc_affinities(struct atl_nic *nic)
+{
+	struct pci_dev *pdev = nic->hw.pdev;
+	int i;
+	unsigned int cpu;
+
+	get_online_cpus();
+	cpu = cpumask_first(cpu_online_mask);
+
+	for (i = 0; i < nic->nvecs; i++) {
+		cpumask_t *cpumask = &nic->qvecs[i].affinity_hint;
+		int vector;
+
+		/* If some cpus went offline since allocating
+		 * vectors, leave the remaining vectors' affininty
+		 * unset.
+		 */
+		if (cpu >= nr_cpumask_bits)
+			break;
+
+		cpumask_clear(cpumask);
+		cpumask_set_cpu(cpu, cpumask);
+		cpu = cpumask_next(cpu, cpu_online_mask);
+		vector = pci_irq_vector(pdev, i + ATL_NUM_NON_RING_IRQS);
+	}
+	put_online_cpus();
+}
+
+/* from commit 6f9a22bc5775d231ab8fbe2c2f3c88e45e3e7c28 */
+static int irq_calc_affinity_vectors(int minvec, int maxvec,
+	const struct irq_affinity *affd)
+{
+	int resv = affd->pre_vectors + affd->post_vectors;
+	int vecs = maxvec - resv;
+	int cpus;
+
+	if (resv > minvec)
+		return 0;
+
+	/* Stabilize the cpumasks */
+	get_online_cpus();
+	cpus = cpumask_weight(cpu_online_mask);
+	put_online_cpus();
+
+	return min(cpus, vecs) + resv;
+}
+
+/* based on commit 402723ad5c625ee052432698ae5e56b02d38d4ec */
+int atl_compat_pci_alloc_irq_vectors_affinity(struct pci_dev *dev,
+	unsigned int min_vecs, unsigned int max_vecs, unsigned int flags,
+	const struct irq_affinity *affd)
+{
+	static const struct irq_affinity msi_default_affd;
+	int vecs = -ENOSPC;
+
+	if (flags & PCI_IRQ_AFFINITY) {
+		if (!affd)
+			affd = &msi_default_affd;
+	} else {
+		if (WARN_ON(affd))
+			affd = NULL;
+	}
+
+	if (affd)
+		max_vecs = irq_calc_affinity_vectors(min_vecs, max_vecs, affd);
+
+	if (flags & PCI_IRQ_MSIX) {
+		struct msix_entry *entries;
+		int i;
+
+		entries = kcalloc(max_vecs, sizeof(*entries), GFP_KERNEL);
+		if (!entries)
+			return -ENOMEM;
+
+		for (i = 0; i < max_vecs; i++)
+			entries[i].entry = i;
+
+		vecs = pci_enable_msix_range(dev, entries, min_vecs, max_vecs);
+		kfree(entries);
+		if (vecs > 0)
+			return vecs;
+	}
+
+	if (flags & PCI_IRQ_MSI) {
+		vecs = pci_enable_msi_range(dev, min_vecs, max_vecs);
+		if (vecs > 0)
+			return vecs;
+	}
+
+	/* use legacy irq if allowed */
+	if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) {
+		pci_intx(dev, 1);
+		return 1;
+	}
+
+	return vecs;
+}
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.h
new file mode 100644
index 0000000..0ec20a3
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_compat.h
@@ -0,0 +1,177 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * Portions Copyright (C) various contributors (see specific commit references)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_COMPAT_H_
+#define _ATL_COMPAT_H_
+
+#include <linux/version.h>
+
+#include <linux/pci.h>
+#include <linux/msi.h>
+
+struct atl_queue_vec;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
+
+/* introduced in commit 686fef928bba6be13cabe639f154af7d72b63120 */
+static inline void timer_setup(struct timer_list *timer,
+	void (*callback)(struct timer_list *), unsigned int flags)
+{
+	setup_timer(timer, (void (*)(unsigned long))callback,
+			(unsigned long)timer);
+}
+
+#endif	/* 4.14.0 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)
+
+/* ->ndo_get_stats64 return type was changed to void in commit
+ * bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221. It's safe to just cast
+ * the pointer to avoid the warning because the only place
+ * ->ndo_get_stats64 was invoked before the change ignored the return
+ * value. */
+#define ATL_COMPAT_CAST_NDO_GET_STATS64
+
+#endif	/* 4.11.0 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)
+
+/* introduced in commit 94842b4fc4d6b1691cfc86c6f5251f299d27f4ba */
+#define ETHTOOL_LINK_MODE_2500baseT_Full_BIT 47
+#define ETHTOOL_LINK_MODE_5000baseT_Full_BIT 48
+
+/* from commit 20e407e195b29a4f5a18d713a61f54a75f992bd5 */
+struct irq_affinity {
+	int	pre_vectors;
+	int	post_vectors;
+};
+
+#define ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY
+struct atl_nic;
+int atl_compat_pci_alloc_irq_vectors_affinity(struct pci_dev *dev,
+	unsigned int min_vecs, unsigned int max_vecs, unsigned int flags,
+	const struct irq_affinity *affd);
+static inline int pci_alloc_irq_vectors_affinity(struct pci_dev *dev,
+	unsigned int min_vecs, unsigned int max_vecs, unsigned int flags,
+	const struct irq_affinity *affd)
+{
+	return atl_compat_pci_alloc_irq_vectors_affinity(dev, min_vecs,
+		max_vecs, flags, affd);
+}
+
+#else  /* 4.10.0 */
+
+#define ATL_HAVE_MINMAX_MTU
+
+#endif	/* 4.10.0 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+
+/* from commit be9d2e8927cef02076bb7b5b2637cd9f4be2e8df */
+static inline int
+pci_request_mem_regions(struct pci_dev *pdev, const char *name)
+{
+	return pci_request_selected_regions(pdev,
+			    pci_select_bars(pdev, IORESOURCE_MEM), name);
+}
+
+#define ATL_COMPAT_PCI_IRQ_VECTOR
+int atl_compat_pci_irq_vector(struct pci_dev *dev, unsigned int nr);
+static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
+{
+	return atl_compat_pci_irq_vector(dev, nr);
+}
+
+static inline void pci_free_irq_vectors(struct pci_dev *dev)
+{
+	pci_disable_msix(dev);
+	pci_disable_msi(dev);
+}
+
+static inline int
+pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+		      unsigned int max_vecs, unsigned int flags)
+{
+	return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
+					      NULL);
+}
+
+/* from commit 4fe0d154880bb6eb833cbe84fa6f385f400f0b9c */
+#define PCI_IRQ_LEGACY		(1 << 0) /* allow legacy interrupts */
+#define PCI_IRQ_MSI		(1 << 1) /* allow MSI interrupts */
+#define PCI_IRQ_MSIX		(1 << 2) /* allow MSI-X interrupts */
+#define PCI_IRQ_AFFINITY	(1 << 3) /* auto-assign affinity */
+
+#endif /* 4.8.0 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+
+/* from commit 1dff8083a024650c75a9c961c38082473ceae8cf */
+#define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
+#endif	/* 4.7.0 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)
+
+/* from commit fe896d1878949ea92ba547587bc3075cc688fb8f */
+static inline void page_ref_inc(struct page *page)
+{
+	atomic_inc(&page->_count);
+}
+
+/* introduced in commit 795bb1c00dd338aa0d12f9a7f1f4776fb3160416 */
+#define napi_consume_skb(__skb, __budget) dev_consume_skb_any(__skb)
+
+/* from commit 3f1ac7a700d039c61d8d8b99f28d605d489a60cf */
+#define ETHTOOL_LINK_MODE_100baseT_Full_BIT 3
+#define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
+#define ETHTOOL_LINK_MODE_10000baseT_Full_BIT 12
+
+/* IPv6 NFC API introduced in commit
+ * 72bb68721f80a1441e871b6afc9ab0b3793d5031 */
+
+/* Define the IPv6 constants for kernels not supporting IPv6 in the
+ * NFC API to reduce the number of #ifdefs in the code. The constants
+ * themselves may already be defined for RSS hash management API, so
+ * #undef them first */
+#undef TCP_V6_FLOW
+#define TCP_V6_FLOW 0x05
+
+#undef UDP_V6_FLOW
+#define UDP_V6_FLOW 0x06
+
+#undef SCTP_V6_FLOW
+#define SCTP_V6_FLOW 0x07
+
+#undef IPV6_USER_FLOW
+#define IPV6_USER_FLOW 0x0e
+#define IPV4_USER_FLOW IP_USER_FLOW
+
+#else
+
+/* introduced in commit 3f1ac7a700d039c61d8d8b99f28d605d489a60cf */
+#define ATL_HAVE_ETHTOOL_KSETTINGS
+
+/* introduced in commit 72bb68721f80a1441e871b6afc9ab0b3793d5031 */
+#define ATL_HAVE_IPV6_NTUPLE
+
+#endif	/* 4.6.0 */
+
+#ifdef ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY
+void atl_compat_set_affinity(int vector, struct atl_queue_vec *qvec);
+void atl_compat_calc_affinities(struct atl_nic *nic);
+#else  /* ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY */
+static inline void atl_compat_set_affinity(int vector, struct atl_queue_vec *qvec)
+{}
+static inline void atl_compat_calc_affinities(struct atl_nic *nic)
+{}
+#endif	/* ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY */
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_desc.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_desc.h
new file mode 100644
index 0000000..63923ef
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_desc.h
@@ -0,0 +1,143 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_DESC_H_
+#define _ATL_DESC_H_
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+struct atl_tx_ctx {
+	unsigned long long :40; //0
+	unsigned tun_len:8;     //40
+	unsigned out_len:16;    //48
+	unsigned type:3;        //64
+	unsigned idx:1;         //67
+	unsigned vlan_tag:16;   //68
+	unsigned cmd:4;         //84
+	unsigned l2_len:7;      //88
+	unsigned l3_len:9;      //95
+	unsigned l4_len:8;      //104
+	unsigned mss_len:16;    //112
+} __attribute__((packed));
+
+struct atl_tx_desc {
+	unsigned long long daddr:64; //0
+	unsigned type:3;        //64
+	unsigned :1;            //67
+	unsigned len:16;        //68
+	unsigned dd:1;          //84
+	unsigned eop:1;         //85
+	unsigned cmd:8;         //86
+	unsigned :14;           //94
+	unsigned ct_idx:1;      //108
+	unsigned ct_en:1;       //109
+	unsigned pay_len:18;    //110
+} __attribute__((packed));
+
+#define ATL_DATA_PER_TXD 16384 // despite ->len being 16 bits
+
+enum atl_tx_desc_type {
+	tx_desc_type_desc = 1,
+	tx_desc_type_context = 2,
+};
+
+enum atl_tx_desc_cmd {
+	tx_desc_cmd_vlan = 1,
+	tx_desc_cmd_fcs = 2,
+	tx_desc_cmd_ipv4cs = 4,
+	tx_desc_cmd_l4cs = 8,
+	tx_desc_cmd_lso = 0x10,
+	tx_desc_cmd_wb = 0x20,
+};
+
+enum atl_tx_ctx_cmd {
+	ctx_cmd_snap = 1, // SNAP / ~802.3
+	ctx_cmd_ipv6 = 2, // IPv6 / ~IPv4
+	ctx_cmd_tcp = 4,  // TCP / ~UDP
+};
+
+struct atl_rx_desc {
+	uint64_t daddr;      			//0
+	union {
+		struct {
+			unsigned dd:1;		//64
+			uint64_t haddr63:63;	//65
+		};
+		uint64_t haddr;
+	};
+} __attribute__((packed));
+
+struct atl_rx_desc_wb {
+	unsigned rss_type:4;    //0
+	unsigned pkt_type:8;    //4
+	unsigned rdm_err:1;     //12
+	unsigned :6;            //13
+	unsigned rx_cntl:2;     //19
+	unsigned sph:1;         //21
+	unsigned hdr_len:10;    //22
+	unsigned rss_hash:32;   //32
+	unsigned dd:1;          //64
+	unsigned eop:1;         //65
+	unsigned rx_stat:4;     //66
+	unsigned rx_estat:6;    //70
+	unsigned rsc_cnt:4;     //76
+	unsigned pkt_len:16;    //80
+	unsigned next_desp:16;  //96
+	unsigned vlan_tag:16;   //112
+} __attribute__((packed));
+
+enum atl_rx_stat {
+	atl_rx_stat_mac_err = 1,
+	atl_rx_stat_ipv4_err = 2,
+	atl_rx_stat_l4_err = 4,
+	atl_rx_stat_l4_valid = 8,
+	atl_rx_stat_err_msk = atl_rx_stat_mac_err | atl_rx_stat_ipv4_err |
+		atl_rx_stat_l4_err,
+};
+
+enum atl_rx_estat {
+	atl_rx_estat_vlan_stripped = 1,
+	atl_rx_estat_l2_ucast_match = 2,
+	atl_rx_estat_vxlan = 1 << 2,
+	atl_rx_estat_nvgre = 2 << 2,
+	atl_rx_estat_geneve = 3 << 2,
+	atl_rx_estat_tun_msk = 3 << 2,
+	atl_rx_estat_outer_ipv4_err = 16,
+	atl_rx_estat_outer_ipv4_valid = 32,
+};
+
+enum atl_rx_pkt_type {
+	atl_rx_pkt_type_ipv4 = 0,
+	atl_rx_pkt_type_ipv6 = 1,
+	atl_rx_pkt_type_l3_other = 2,
+	atl_rx_pkt_type_l3_arp = 3,
+	atl_rx_pkt_type_l3_msk = 3,
+	atl_rx_pkt_type_tcp = 0 << 2,
+	atl_rx_pkt_type_udp = 1 << 2 ,
+	atl_rx_pkt_type_sctp = 2 << 2,
+	atl_rx_pkt_type_icmp = 3 << 2,
+	atl_rx_pkt_type_l4_msk = ((1 << 3) - 1) << 2,
+	atl_rx_pkt_type_vlan = 1 << 5,
+	atl_rx_pkt_type_dbl_vlan = 2 << 5,
+	atl_rx_pkt_type_vlan_msk = ((1 << 2) - 1) << 5,
+};
+
+#else // defined(__LITTLE_ENDIAN_BITFIELD)
+#error XXX Fix bigendian bitfields
+#endif // defined(__LITTLE_ENDIAN_BITFIELD)
+
+union atl_desc{
+	struct atl_rx_desc rx;
+	struct atl_rx_desc_wb wb;
+	struct atl_tx_ctx ctx;
+	struct atl_tx_desc tx;
+	uint8_t raw[16];
+}__attribute__((packed));
+
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_drviface.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_drviface.h
new file mode 100644
index 0000000..02449e8
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_drviface.h
@@ -0,0 +1,419 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_DRVIFACE_H_
+#define _ATL_DRVIFACE_H_
+
+typedef uint16_t in_port_t;
+typedef uint32_t in_addr_t;
+
+struct __attribute__((packed)) offloadKAv4 {
+    uint32_t timeout;
+    in_port_t local_port;
+    in_port_t remote_port;
+    uint8_t remote_mac_addr[6];
+    uint16_t win_size;
+    uint32_t seq_num;
+    uint32_t ack_num;
+    in_addr_t local_ip;
+    in_addr_t remote_ip;
+};
+
+struct __attribute__((packed)) offloadKAv6 {
+    uint32_t timeout;
+    in_port_t local_port;
+    in_port_t remote_port;
+    uint8_t remote_mac_addr[6];
+    uint16_t win_size;
+    uint32_t seq_num;
+    uint32_t ack_num;
+    struct in6_addr local_ip;
+    struct in6_addr remote_ip;
+};
+
+struct __attribute__((packed)) offloadIPInfo {
+    uint8_t v4LocalAddrCount;
+    uint8_t v4AddrCount;
+    uint8_t v6LocalAddrCount;
+    uint8_t v6AddrCount;
+    // FW will add the base to the following offset fields and will treat them as pointers.
+    // The offsets are relative to the start of this struct so that the struct is pretty much self-contained
+    // in_addr_t *
+    uint32_t v4AddrOfft;
+    // uint8_t *
+    uint32_t v4PrefixOfft;
+    // in6_addr *
+    uint32_t v6AddrOfft;
+    // uint8_t *
+    uint32_t v6PrefixOfft;
+};
+
+struct __attribute__((packed)) offloadPortInfo {
+    uint16_t UDPPortCount;
+    uint16_t TCPPortCount;
+    // in_port_t *
+    uint32_t UDPPortOfft;       // See the comment in the offloadIPInfo struct
+                                // in_port_t *
+    uint32_t TCPPortOfft;
+};
+
+struct __attribute__((packed))  offloadKAInfo {
+    uint16_t v4KACount;
+    uint16_t v6KACount;
+    uint32_t retryCount;
+    uint32_t retryInterval;
+    // struct offloadKAv4 *
+    uint32_t v4KAOfft;          // See the comment in the offloadIPInfo struct
+                                // struct offloadKAv6 *
+    uint32_t v6KAOfft;
+};
+
+struct  __attribute__((packed)) offloadRRInfo {
+    uint32_t RRCount;
+    uint32_t RRBufLen;
+    // Offset to RR index table relative to the start of offloadRRInfo struct. The indices
+    // themselves are relative to the start of RR buffer. FW will add the buffer address
+    // and will treat them as pointers.
+    // uint8_t **
+    uint32_t RRIdxOfft;
+    // Offset to the RR buffer relative to the start of offloadRRInfo struct.
+    // uint8_t *
+    uint32_t RRBufOfft;
+};
+
+struct __attribute__((packed)) offloadInfo {
+    uint32_t version;               // = 0 till it stabilizes some
+    uint32_t len;                   // The whole structure length including the variable-size buf
+    uint8_t macAddr[8];
+    struct offloadIPInfo ips;
+    struct offloadPortInfo ports;
+    struct offloadKAInfo kas;
+    struct offloadRRInfo rrs;
+    uint8_t buf[0];
+};
+
+#define FW_PACK_STRUCT __attribute__((packed))
+
+#define DRV_REQUEST_SIZE 3072
+#define DRV_MSG_PING            0x01
+#define DRV_MSG_ARP             0x02
+#define DRV_MSG_INJECT          0x03
+#define DRV_MSG_WOL_ADD         0x04
+#define DRV_MSG_WOL_REMOVE      0x05
+#define DRV_MSG_ENABLE_WAKEUP   0x06
+#define DRV_MSG_MSM             0x07
+#define DRV_MSG_PROVISIONING    0x08
+#define DRV_MSG_OFFLOAD_ADD     0x09
+#define DRV_MSG_OFFLOAD_REMOVE 	0x0A
+#define DRV_MSG_MSM_EX          0x0B
+#define DRV_MSG_SMBUS_PROXY     0x0C
+
+#define DRV_PROV_APPLY         1
+#define DRV_PROV_REPLACE       2
+#define DRV_PROV_ADD           3
+
+#define FW_RPC_INJECT_PACKET_LEN 1514U
+
+typedef enum {
+    EVENT_DRIVER_ENABLE_WOL
+} eDriverEvent;
+
+//typedef enum {
+//    HOST_UNINIT = 0,
+//    HOST_RESET,
+//    HOST_INIT,
+//    HOST_RESERVED,
+//    HOST_SLEEP,
+//    HOST_INVALID
+//} hostState_t;
+
+struct drvMsgPing {
+    uint32_t ping;
+} FW_PACK_STRUCT;
+
+union IPAddr {
+    struct
+    {
+        uint8_t addr[16];
+    } FW_PACK_STRUCT v6;
+    struct
+    {
+        uint8_t padding[12];
+        uint8_t addr[4];
+    } FW_PACK_STRUCT v4;
+} FW_PACK_STRUCT;
+
+struct drvMsgArp {
+    uint8_t macAddr[6];
+    uint32_t uIpAddrCnt;
+    struct
+    {
+        union IPAddr addr;
+        union IPAddr mask;
+    } FW_PACK_STRUCT ip[1];
+} FW_PACK_STRUCT;
+
+struct drvMsgInject {
+    uint32_t len;
+    uint8_t packet[FW_RPC_INJECT_PACKET_LEN];
+} FW_PACK_STRUCT;
+
+enum ndisPmWoLPacket {
+    ndisPMWoLPacketUnspecified = 0,
+    ndisPMWoLPacketBitmapPattern,
+    ndisPMWoLPacketMagicPacket,
+    ndisPMWoLPacketIPv4TcpSyn,
+    ndisPMWoLPacketIPv6TcpSyn,
+    ndisPMWoLPacketEapolRequestIdMessage,
+    ndisPMWoLPacketMaximum
+};
+
+enum aqPmWoLPacket {
+    aqPMWoLPacketUnspecified = 0x10000,
+    aqPMWoLPacketArp,
+    aqPMWoLPacketIPv4Ping,
+    aqPMWoLPacketIpv6NsPacket,
+    aqPMWoLPacketIpv6Ping,
+    aqPMWoLReasonLinkUp,
+    aqPMWoLReasonLinkDown,
+    aqPMWoLPacketMaximum
+};
+
+enum ndisPmProtocolOffloadType {
+    ndisPMProtocolOffloadIdUnspecified,
+    ndisPMProtocolOffloadIdIPv4ARP,
+    ndisPMProtocolOffloadIdIPv6NS,
+    ndisPMProtocolOffload80211RSNRekey,
+    ndisPMProtocolOffloadIdMaximum
+};
+
+struct drvMsgEnableWakeup {
+    uint32_t patternMaskWindows;
+    uint32_t patternMaskAquantia;
+    uint32_t patternMaskOther;
+    uint32_t offloadsMaskWindows;
+    uint32_t offloadsMaskAquantia;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddIpv4TcpSynWoLPacketParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4SourceAddress;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4DestAddress;
+    union {
+        uint8_t v8[2];
+        uint16_t v16;
+    } TCPSourcePortNumber;
+    union {
+        uint8_t v8[2];
+        uint16_t v16;
+    } TCPDestPortNumber;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddIpv6TcpSynWoLPacketParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } IPv6SourceAddress;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } IPv6DestAddress;
+    union {
+        uint8_t v8[2];
+        uint16_t v16;
+    } TCPSourcePortNumber;
+    union {
+        uint8_t v8[2];
+        uint16_t v16;
+    } TCPDestPortNumber;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddIpv4PingWoLPacketParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4SourceAddress;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4DestAddress;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddIpv6PingWoLPacketParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } IPv6SourceAddress;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } IPv6DestAddress;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddEapolRequestIdMessageWoLPacketParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4SourceAddress;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4DestAddress;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddBitmapPattern {
+    uint32_t Flags;
+    uint32_t MaskOffset;
+    uint32_t MaskSize;
+    uint32_t PatternOffset;
+    uint32_t PatternSize;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddMagicPacketPattern {
+    uint8_t macAddr[6];
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddArpWoLPacketParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[4];
+        uint32_t v32;
+    } IPv4Address;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddLinkUpWoLParameters {
+    uint32_t timeout;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAddLinkDownWoLParameters {
+    uint32_t timeout;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLAdd {
+    uint32_t priority; // Currently not used
+    uint32_t packetType; // One of ndisPmWoLPacket or aqPmWoLPacket
+    uint32_t patternId; // Id to save - will be used in remove message
+    uint32_t nextWoLPatternOffset; // For chaining multiple additions in one request
+
+    // Depends on `parrernId`
+    union _WOL_PATTERN {
+        struct drvMsgWoLAddIpv4TcpSynWoLPacketParameters wolIpv4TcpSyn;
+        struct drvMsgWoLAddIpv6TcpSynWoLPacketParameters wolIpv6TcpSyn;
+        struct drvMsgWoLAddEapolRequestIdMessageWoLPacketParameters wolEapolRequestIdMessage;
+        struct drvMsgWoLAddBitmapPattern wolBitmap;
+        struct drvMsgWoLAddMagicPacketPattern wolMagicPacket;
+        struct drvMsgWoLAddIpv4PingWoLPacketParameters wolIpv4Ping;
+        struct drvMsgWoLAddIpv6PingWoLPacketParameters wolIpv6Ping;
+        struct drvMsgWoLAddArpWoLPacketParameters wolArp;
+        struct drvMsgWoLAddLinkUpWoLParameters wolLinkUpReason;
+        struct drvMsgWoLAddLinkDownWoLParameters wolLinkDownReason;
+    } wolPattern;
+} FW_PACK_STRUCT;
+
+struct drvMsgWoLRemove {
+    uint32_t id;
+} FW_PACK_STRUCT;
+
+struct ipv4ArpParameters {
+    uint32_t flags;
+    uint8_t remoteIPv4Address[4];
+    uint8_t hostIPv4Address[4];
+    uint8_t macAddress[6];
+} FW_PACK_STRUCT;
+
+struct ipv6NsParameters {
+    uint32_t flags;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } remoteIPv6Address;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } solicitedNodeIPv6Address;
+    union {
+        uint8_t v8[16];
+        uint32_t v32[4];
+    } targetIPv6Addresses[2];
+    uint8_t macAddress[6];
+} FW_PACK_STRUCT;
+
+struct drvMsgOffloadAdd {
+    uint32_t priority;
+    uint32_t protocolOffloadType;
+    uint32_t protocolOffloadId;
+    uint32_t nextProtocolOffloadOffset;
+    union {
+        struct ipv4ArpParameters ipv4Arp;
+        struct ipv6NsParameters ipv6Ns;
+    } wolOffload;
+} FW_PACK_STRUCT;
+
+struct drvMsgOffloadRemove {
+    uint32_t id;
+} FW_PACK_STRUCT;
+
+struct drvMsmSettings {
+    uint32_t msmReg054;
+    uint32_t msmReg058;
+    uint32_t msmReg05c;
+    uint32_t msmReg060;
+    uint32_t msmReg064;
+    uint32_t msmReg068;
+    uint32_t msmReg06c;
+    uint32_t msmReg070;
+    uint32_t flags;     // Valid for message DRV_MSG_MSM_EX only
+} FW_PACK_STRUCT;
+
+//struct drvMsgProvisioning {
+//    uint32_t command;
+//    uint32_t len;
+//    provList_t list;
+//} FW_PACK_STRUCT;
+
+//struct drvMsgSmbusProxy {
+//    uint32_t typeMsg;
+//    union {
+//        struct smbusProxyWrite smbWrite;
+//        struct smbusProxyRead smbRead;
+//        struct smbusProxyGetStatus smbStatus;
+//        struct smbusProxyReadResp smbReadResp;
+//    } FW_PACK_STRUCT;
+//} FW_PACK_STRUCT;
+
+struct drvIface {
+    uint32_t msgId;
+
+    union {
+        struct drvMsgPing msgPing;
+        struct drvMsgArp msgArp;
+        struct drvMsgInject msgInject;
+        struct drvMsgWoLAdd msgWoLAdd;
+        struct drvMsgWoLRemove msgWoLRemove;
+        struct drvMsgEnableWakeup msgEnableWakeup;
+        struct drvMsmSettings msgMsm;
+//        struct drvMsgProvisioning msgProvisioning;
+        struct drvMsgOffloadAdd msgOffloadAdd;
+        struct drvMsgOffloadRemove msgOffloadRemove;
+//        struct drvMsgSmbusProxy msgSmbusProxy;
+        struct offloadInfo fw2xOffloads;
+    } FW_PACK_STRUCT;
+} FW_PACK_STRUCT;
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ethtool.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ethtool.c
new file mode 100644
index 0000000..624a6e3
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ethtool.c
@@ -0,0 +1,2003 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/ethtool.h>
+
+#include "atl_common.h"
+#include "atl_ring.h"
+
+static uint32_t atl_ethtool_get_link(struct net_device *ndev)
+{
+	return ethtool_op_get_link(ndev);
+}
+
+static void atl_link_to_kernel(unsigned int bits, unsigned long *kernel,
+	bool legacy)
+{
+	struct atl_link_type *type;
+	int i;
+
+	atl_for_each_rate(i, type) {
+		if (legacy && type->ethtool_idx > 31)
+			continue;
+
+		if (bits & BIT(i))
+			__set_bit(type->ethtool_idx, kernel);
+	}
+}
+
+#define atl_ethtool_get_common(base, modes, lstate, legacy)		\
+do {									\
+	struct atl_fc_state *fc = &(lstate)->fc;			\
+	(base)->port = PORT_TP;						\
+	(base)->duplex = DUPLEX_FULL;					\
+	(base)->autoneg = AUTONEG_DISABLE;				\
+	(base)->eth_tp_mdix = ETH_TP_MDI_INVALID;			\
+	(base)->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;			\
+									\
+	atl_add_link_supported(modes, Autoneg);				\
+	atl_add_link_supported(modes, TP);				\
+	atl_add_link_supported(modes, Pause);				\
+	atl_add_link_supported(modes, Asym_Pause);			\
+	atl_add_link_advertised(modes, TP);				\
+	atl_add_link_lpadvertised(modes, Autoneg);			\
+									\
+	if (lstate->autoneg) {						\
+		(base)->autoneg = AUTONEG_ENABLE;			\
+		atl_add_link_advertised(modes, Autoneg);		\
+	}								\
+									\
+	if (fc->req & atl_fc_rx)					\
+		atl_add_link_advertised(modes, Pause);			\
+									\
+	if (!!(fc->req & atl_fc_rx) ^ !!(fc->req & atl_fc_tx))		\
+		atl_add_link_advertised(modes, Asym_Pause);		\
+									\
+	if (fc->cur & atl_fc_rx)					\
+		atl_add_link_lpadvertised(modes, Pause);		\
+									\
+	if (!!(fc->cur & atl_fc_rx) ^ !!(fc->cur & atl_fc_tx))		\
+		atl_add_link_lpadvertised(modes, Asym_Pause);		\
+									\
+	atl_link_to_kernel((lstate)->supported,				\
+		(unsigned long *)&(modes)->link_modes.supported,	\
+		legacy);						\
+	atl_link_to_kernel((lstate)->advertized,			\
+		(unsigned long *)&(modes)->link_modes.advertising,	\
+		legacy);						\
+	atl_link_to_kernel((lstate)->lp_advertized,			\
+		(unsigned long *)&(modes)->link_modes.lp_advertising,	\
+		legacy);						\
+} while (0)
+
+#define atl_add_link_supported(ptr, mode) \
+	atl_add_link_mode(ptr, SUPPORTED, supported, mode)
+
+#define atl_add_link_advertised(ptr, mode) \
+	atl_add_link_mode(ptr, ADVERTISED, advertising, mode)
+
+#define atl_add_link_lpadvertised(ptr, mode) \
+	atl_add_link_mode(ptr, ADVERTISED, lp_advertising, mode)
+
+#ifndef ATL_HAVE_ETHTOOL_KSETTINGS
+
+struct atl_ethtool_compat {
+	struct {
+		unsigned long supported;
+		unsigned long advertising;
+		unsigned long lp_advertising;
+	} link_modes;
+};
+
+#define atl_add_link_mode(ptr, nameuc, namelc, mode)	\
+	do { \
+		(ptr)->link_modes.namelc |= nameuc ## _ ## mode; \
+	} while (0)
+
+static int atl_ethtool_get_settings(struct net_device *ndev,
+				 struct ethtool_cmd *cmd)
+{
+	struct atl_ethtool_compat cmd_compat = {0};
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_link_state *lstate = &nic->hw.link_state;
+
+	atl_ethtool_get_common(cmd, &cmd_compat, lstate, true);
+	cmd->supported = cmd_compat.link_modes.supported;
+	cmd->advertising = cmd_compat.link_modes.advertising;
+	cmd->lp_advertising = cmd_compat.link_modes.lp_advertising;
+
+	ethtool_cmd_speed_set(cmd, lstate->link ? lstate->link->speed : 0);
+
+	return 0;
+}
+
+#else
+
+#define atl_add_link_mode(ptr, nameuc, namelc, mode)	\
+	ethtool_link_ksettings_add_link_mode(ptr, namelc, mode)
+
+static int atl_ethtool_get_ksettings(struct net_device *ndev,
+	struct ethtool_link_ksettings *cmd)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_link_state *lstate = &nic->hw.link_state;
+
+	ethtool_link_ksettings_zero_link_mode(cmd, supported);
+	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+	ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
+
+	atl_ethtool_get_common(&cmd->base, cmd, lstate, false);
+
+	cmd->base.speed = lstate->link ? lstate->link->speed : 0;
+
+	return 0;
+}
+
+#endif
+
+#undef atl_add_link_supported
+#undef atl_add_link_advertised
+#undef atl_add_link_lpadvertised
+#undef atl_add_link_mode
+
+static unsigned int atl_kernel_to_link(const unsigned long int *bits,
+	bool legacy)
+{
+	unsigned int ret = 0;
+	int i;
+	struct atl_link_type *type;
+
+	atl_for_each_rate(i, type) {
+		if (legacy && type->ethtool_idx > 31)
+			continue;
+
+		if (test_bit(type->ethtool_idx, bits))
+			ret |= BIT(i);
+	}
+
+	return ret;
+}
+
+static int atl_set_fixed_speed(struct atl_hw *hw, unsigned int speed)
+{
+	struct atl_link_state *lstate = &hw->link_state;
+	struct atl_link_type *type;
+	int i;
+
+	atl_for_each_rate(i, type)
+		if (type->speed == speed) {
+			if (!(lstate->supported & BIT(i)))
+				return -EINVAL;
+
+			lstate->advertized = BIT(i);
+			break;
+		}
+
+	lstate->autoneg = false;
+	hw->mcp.ops->set_link(hw, false);
+	return 0;
+}
+
+#define atl_ethtool_set_common(base, lstate, advertise, tmp, legacy, speed) \
+do {									\
+	struct atl_fc_state *fc = &lstate->fc;				\
+									\
+	if ((base)->port != PORT_TP || (base)->duplex != DUPLEX_FULL)	\
+		return -EINVAL;						\
+									\
+	if ((base)->autoneg != AUTONEG_ENABLE)				\
+		return atl_set_fixed_speed(hw, speed);			\
+									\
+	atl_add_link_bit(tmp, Autoneg);					\
+	atl_add_link_bit(tmp, TP);					\
+	atl_add_link_bit(tmp, Pause);					\
+	atl_add_link_bit(tmp, Asym_Pause);				\
+	atl_link_to_kernel((lstate)->supported, tmp, legacy);		\
+									\
+	if (atl_complement_intersect(advertise, tmp)) {			\
+		atl_nic_dbg("Unsupported advertising bits from ethtool\n"); \
+		return -EINVAL;						\
+	}								\
+									\
+	lstate->autoneg = true;						\
+	(lstate)->advertized &= ATL_EEE_MASK;				\
+	(lstate)->advertized |= atl_kernel_to_link(advertise, legacy);	\
+									\
+	fc->req = 0;							\
+	if (atl_test_link_bit(advertise, Pause))			\
+		fc->req	|= atl_fc_full;					\
+									\
+	if (atl_test_link_bit(advertise, Asym_Pause))			\
+		fc->req ^= atl_fc_tx;					\
+									\
+} while (0)
+
+#ifndef ATL_HAVE_ETHTOOL_KSETTINGS
+
+#define atl_add_link_bit(ptr, name)		\
+	(*(ptr) |= SUPPORTED_ ## name)
+
+#define atl_test_link_bit(ptr, name)		\
+	(*(ptr) & SUPPORTED_ ## name)
+
+static inline bool atl_complement_intersect(const unsigned long *advertised,
+	unsigned long *supported)
+{
+	return !!(*(uint32_t *)advertised & ~*(uint32_t *)supported);
+}
+
+static int atl_ethtool_set_settings(struct net_device *ndev,
+	struct ethtool_cmd *cmd)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	struct atl_link_state *lstate = &hw->link_state;
+	unsigned long tmp = 0;
+	uint32_t speed = ethtool_cmd_speed(cmd);
+
+	atl_ethtool_set_common(cmd, lstate,
+		(unsigned long *)&cmd->advertising, &tmp, true, speed);
+	hw->mcp.ops->set_link(hw);
+	return 0;
+}
+
+#else
+
+#define atl_add_link_bit(ptr, name)				\
+	__set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, ptr)
+
+#define atl_test_link_bit(ptr, name)				\
+	test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, ptr)
+
+static inline bool atl_complement_intersect(const unsigned long *advertised,
+	unsigned long *supported)
+{
+	bitmap_complement(supported, supported,
+		__ETHTOOL_LINK_MODE_MASK_NBITS);
+	return bitmap_intersects(advertised, supported,
+		__ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int atl_ethtool_set_ksettings(struct net_device *ndev,
+	const struct ethtool_link_ksettings *cmd)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	struct atl_link_state *lstate = &hw->link_state;
+	const struct ethtool_link_settings *base = &cmd->base;
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
+
+	bitmap_zero(tmp, __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+	atl_ethtool_set_common(base, lstate, cmd->link_modes.advertising, tmp,
+		false, cmd->base.speed);
+	hw->mcp.ops->set_link(hw, false);
+	return 0;
+}
+
+#endif
+
+#undef atl_add_link_bit
+#undef atl_test_link_bit
+
+static uint32_t atl_rss_tbl_size(struct net_device *ndev)
+{
+	return ATL_RSS_TBL_SIZE;
+}
+
+static uint32_t atl_rss_key_size(struct net_device *ndev)
+{
+	return ATL_RSS_KEY_SIZE;
+}
+
+static int atl_rss_get_rxfh(struct net_device *ndev, uint32_t *tbl,
+	uint8_t *key, uint8_t *htype)
+{
+	struct atl_hw *hw = &((struct atl_nic *)netdev_priv(ndev))->hw;
+	int i;
+
+	if (htype)
+		*htype = ETH_RSS_HASH_TOP;
+
+	if (key)
+		memcpy(key, hw->rss_key, atl_rss_key_size(ndev));
+
+	if (tbl)
+		for (i = 0; i < atl_rss_tbl_size(ndev); i++)
+			tbl[i] = hw->rss_tbl[i];
+
+	return 0;
+}
+
+static int atl_rss_set_rxfh(struct net_device *ndev, const uint32_t *tbl,
+	const uint8_t *key, const uint8_t htype)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	int i;
+	uint32_t tbl_size = atl_rss_tbl_size(ndev);
+
+	if (htype && htype != ETH_RSS_HASH_TOP)
+		return -EINVAL;
+
+	if (tbl) {
+		for (i = 0; i < tbl_size; i++)
+			if (tbl[i] >= nic->nvecs)
+				return -EINVAL;
+
+		for (i = 0; i < tbl_size; i++)
+			hw->rss_tbl[i] = tbl[i];
+	}
+
+	if (key) {
+		memcpy(hw->rss_key, key, atl_rss_key_size(ndev));
+		atl_set_rss_key(hw);
+	}
+
+	if (tbl)
+		atl_set_rss_tbl(hw);
+
+	return 0;
+}
+
+static void atl_get_channels(struct net_device *ndev,
+	struct ethtool_channels *chan)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	chan->max_combined = ATL_MAX_QUEUES;
+	chan->combined_count = nic->nvecs;
+	if (nic->flags & ATL_FL_MULTIPLE_VECTORS)
+		chan->max_other = chan->other_count = ATL_NUM_NON_RING_IRQS;
+}
+
+static int atl_set_channels(struct net_device *ndev,
+			    struct ethtool_channels *chan)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	unsigned int nvecs = chan->combined_count;
+
+	if (!nvecs || chan->rx_count || chan->tx_count)
+		return -EINVAL;
+
+	if (nic->flags & ATL_FL_MULTIPLE_VECTORS &&
+		chan->other_count != ATL_NUM_NON_RING_IRQS)
+		return -EINVAL;
+
+	if (!(nic->flags & ATL_FL_MULTIPLE_VECTORS) &&
+		chan->other_count)
+		return -EINVAL;
+
+	if (nvecs > atl_max_queues)
+		return -EINVAL;
+
+	nic->requested_nvecs = nvecs;
+
+	return atl_reconfigure(nic);
+}
+
+static void atl_get_pauseparam(struct net_device *ndev,
+	struct ethtool_pauseparam *pause)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_fc_state *fc = &nic->hw.link_state.fc;
+
+	pause->autoneg = 1;
+	pause->rx_pause = !!(fc->cur & atl_fc_rx);
+	pause->tx_pause = !!(fc->cur & atl_fc_tx);
+}
+
+static int atl_set_pauseparam(struct net_device *ndev,
+	struct ethtool_pauseparam *pause)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	struct atl_link_state *lstate = &hw->link_state;
+	struct atl_fc_state *fc = &lstate->fc;
+
+	if (atl_fw_major(hw) < 2)
+		return -EOPNOTSUPP;
+
+	if (pause->autoneg && !lstate->autoneg)
+		return -EINVAL;
+
+	fc->req = pause->autoneg ? atl_fc_full :
+		(!!pause->rx_pause << atl_fc_rx_shift) |
+		(!!pause->tx_pause << atl_fc_tx_shift);
+
+	hw->mcp.ops->set_link(hw, false);
+	return 0;
+}
+
+static int atl_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_link_state *lstate = &nic->hw.link_state;
+	int ret = 0;
+
+	eee->supported = eee->advertised = eee->lp_advertised = 0;
+
+	/* Casting to unsigned long is safe, as atl_link_to_kernel()
+	 * will only access low 32 bits when called with legacy == true
+	 */
+	atl_link_to_kernel(lstate->supported >> ATL_EEE_BIT_OFFT,
+		(unsigned long *)&eee->supported, true);
+	atl_link_to_kernel(lstate->advertized >> ATL_EEE_BIT_OFFT,
+		(unsigned long *)&eee->advertised, true);
+	atl_link_to_kernel(lstate->lp_advertized >> ATL_EEE_BIT_OFFT,
+		(unsigned long *)&eee->lp_advertised, true);
+
+	eee->eee_enabled = eee->tx_lpi_enabled = lstate->eee_enabled;
+	eee->eee_active = lstate->eee;
+
+	ret = atl_get_lpi_timer(nic, &eee->tx_lpi_timer);
+
+	return ret;
+}
+
+static int atl_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	struct atl_link_state *lstate = &hw->link_state;
+	uint32_t tmp = 0;
+
+	if (atl_fw_major(hw) < 2)
+		return -EOPNOTSUPP;
+
+	atl_get_lpi_timer(nic, &tmp);
+	if (eee->tx_lpi_timer != tmp)
+		return -EOPNOTSUPP;
+
+	lstate->eee_enabled = eee->eee_enabled;
+
+	if (lstate->eee_enabled) {
+		atl_link_to_kernel(lstate->supported >> ATL_EEE_BIT_OFFT,
+			(unsigned long *)&tmp, true);
+		if (eee->advertised & ~tmp)
+			return -EINVAL;
+
+		/* advertize the requested link or all supported */
+		if (eee->advertised)
+			tmp = atl_kernel_to_link(
+					(unsigned long *)&eee->advertised,
+					true);
+		else
+			tmp = atl_kernel_to_link(
+					(unsigned long *)&tmp, true);
+	}
+
+	lstate->advertized &= ~ATL_EEE_MASK;
+	if (lstate->eee_enabled)
+		lstate->advertized |= tmp << ATL_EEE_BIT_OFFT;
+
+	hw->mcp.ops->set_link(hw, false);
+	return 0;
+}
+
+static void atl_get_drvinfo(struct net_device *ndev,
+	struct ethtool_drvinfo *drvinfo)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	uint32_t fw_rev = nic->hw.mcp.fw_rev;
+
+	strlcpy(drvinfo->driver, atl_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, ATL_VERSION, sizeof(drvinfo->version));
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		"%d.%d.%d", fw_rev >> 24, fw_rev >> 16 & 0xff,
+		fw_rev & 0xffff);
+	strlcpy(drvinfo->bus_info, pci_name(nic->hw.pdev),
+		sizeof(drvinfo->bus_info));
+}
+
+static int atl_nway_reset(struct net_device *ndev)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+
+	return hw->mcp.ops->restart_aneg(hw);
+}
+
+static void atl_get_ringparam(struct net_device *ndev,
+	struct ethtool_ringparam *rp)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	rp->rx_mini_max_pending = rp->rx_mini_pending = 0;
+	rp->rx_jumbo_max_pending = rp->rx_jumbo_pending = 0;
+
+	rp->rx_max_pending = rp->tx_max_pending = ATL_MAX_RING_SIZE;
+
+	rp->rx_pending = nic->requested_rx_size;
+	rp->tx_pending = nic->requested_tx_size;
+}
+
+static int atl_set_ringparam(struct net_device *ndev,
+	struct ethtool_ringparam *rp)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	if (rp->rx_mini_pending || rp->rx_jumbo_pending)
+		return -EINVAL;
+
+	if (rp->rx_pending < 8 || rp->tx_pending < 8)
+		return -EINVAL;
+
+	nic->requested_rx_size = rp->rx_pending & ~7;
+	nic->requested_tx_size = rp->tx_pending & ~7;
+
+	return atl_reconfigure(nic);
+}
+
+struct atl_stat_desc {
+	char stat_name[ETH_GSTRING_LEN];
+	int idx;
+};
+
+#define ATL_TX_STAT(_name, _field)				\
+{								\
+	.stat_name = #_name,					\
+	.idx = offsetof(struct atl_tx_ring_stats, _field) /	\
+		sizeof(uint64_t),				\
+}
+
+#define ATL_RX_STAT(_name, _field)				\
+{								\
+	.stat_name = #_name,					\
+	.idx = offsetof(struct atl_rx_ring_stats, _field) /	\
+		sizeof(uint64_t),				\
+}
+
+#define ATL_ETH_STAT(_name, _field)				\
+{								\
+	.stat_name = #_name,					\
+	.idx = offsetof(struct atl_ether_stats, _field) /	\
+		sizeof(uint64_t),				\
+}
+
+static const struct atl_stat_desc tx_stat_descs[] = {
+	ATL_TX_STAT(tx_packets, packets),
+	ATL_TX_STAT(tx_bytes, bytes),
+	ATL_TX_STAT(tx_busy, tx_busy),
+	ATL_TX_STAT(tx_queue_restart, tx_restart),
+	ATL_TX_STAT(tx_dma_map_failed, dma_map_failed),
+};
+
+static const struct atl_stat_desc rx_stat_descs[] = {
+	ATL_RX_STAT(rx_packets, packets),
+	ATL_RX_STAT(rx_bytes, bytes),
+	ATL_RX_STAT(rx_multicast_packets, multicast),
+	ATL_RX_STAT(rx_lin_skb_overrun, linear_dropped),
+	ATL_RX_STAT(rx_skb_alloc_failed, alloc_skb_failed),
+	ATL_RX_STAT(rx_head_page_reused, reused_head_page),
+	ATL_RX_STAT(rx_data_page_reused, reused_data_page),
+	ATL_RX_STAT(rx_head_page_allocated, alloc_head_page),
+	ATL_RX_STAT(rx_data_page_allocated, alloc_data_page),
+	ATL_RX_STAT(rx_head_page_alloc_failed, alloc_head_page_failed),
+	ATL_RX_STAT(rx_data_page_alloc_failed, alloc_data_page_failed),
+	ATL_RX_STAT(rx_non_eop_descs, non_eop_descs),
+	ATL_RX_STAT(rx_mac_err, mac_err),
+	ATL_RX_STAT(rx_checksum_err, csum_err),
+};
+
+static const struct atl_stat_desc eth_stat_descs[] = {
+	ATL_ETH_STAT(tx_pause, tx_pause),
+	ATL_ETH_STAT(rx_pause, rx_pause),
+	ATL_ETH_STAT(rx_ether_drops, rx_ether_drops),
+	ATL_ETH_STAT(rx_ether_octets, rx_ether_octets),
+	ATL_ETH_STAT(rx_ether_pkts, rx_ether_pkts),
+	ATL_ETH_STAT(rx_ether_broacasts, rx_ether_broacasts),
+	ATL_ETH_STAT(rx_ether_multicasts, rx_ether_multicasts),
+	ATL_ETH_STAT(rx_ether_crc_align_errs, rx_ether_crc_align_errs),
+	ATL_ETH_STAT(rx_filter_host, rx_filter_host),
+	ATL_ETH_STAT(rx_filter_lost, rx_filter_lost),
+};
+
+#define ATL_PRIV_FLAG(_name, _bit)		\
+	[ATL_PF(_bit)] = #_name
+
+static const char atl_priv_flags[][ETH_GSTRING_LEN] = {
+	ATL_PRIV_FLAG(PKTSystemLoopback, LPB_SYS_PB),
+	ATL_PRIV_FLAG(DMASystemLoopback, LPB_SYS_DMA),
+	/* ATL_PRIV_FLAG(DMANetworkLoopback, LPB_NET_DMA), */
+	ATL_PRIV_FLAG(RX_LPI_MAC, LPI_RX_MAC),
+	ATL_PRIV_FLAG(TX_LPI_MAC, LPI_TX_MAC),
+	ATL_PRIV_FLAG(RX_LPI_PHY, LPI_RX_PHY),
+	ATL_PRIV_FLAG(TX_LPI_PHY, LPI_TX_PHY),
+	ATL_PRIV_FLAG(ResetStatistics, STATS_RESET),
+	ATL_PRIV_FLAG(StripEtherPadding, STRIP_PAD),
+};
+
+static int atl_get_sset_count(struct net_device *ndev, int sset)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(tx_stat_descs) * (nic->nvecs + 1) +
+			ARRAY_SIZE(rx_stat_descs) * (nic->nvecs + 1) +
+			ARRAY_SIZE(eth_stat_descs);
+
+	case ETH_SS_PRIV_FLAGS:
+		return ARRAY_SIZE(atl_priv_flags);
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void atl_copy_stats_strings(char **data, char *prefix,
+	const struct atl_stat_desc *descs, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		snprintf(*data, ETH_GSTRING_LEN, "%s%s",
+			prefix, descs[i].stat_name);
+		*data += ETH_GSTRING_LEN;
+	}
+}
+
+static void atl_copy_stats_string_set(char **data, char *prefix)
+{
+	atl_copy_stats_strings(data, prefix, tx_stat_descs,
+		ARRAY_SIZE(tx_stat_descs));
+	atl_copy_stats_strings(data, prefix, rx_stat_descs,
+		ARRAY_SIZE(rx_stat_descs));
+}
+
+static void atl_get_strings(struct net_device *ndev, uint32_t sset,
+	uint8_t *data)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	int i;
+	char prefix[16];
+	char *p = data;
+
+	switch (sset) {
+	case ETH_SS_STATS:
+		atl_copy_stats_string_set(&p, "");
+
+		atl_copy_stats_strings(&p, "", eth_stat_descs,
+			ARRAY_SIZE(eth_stat_descs));
+
+		for (i = 0; i < nic->nvecs; i++) {
+			snprintf(prefix, sizeof(prefix), "ring_%d_", i);
+			atl_copy_stats_string_set(&p, prefix);
+		}
+		return;
+
+	case ETH_SS_PRIV_FLAGS:
+		memcpy(p, atl_priv_flags, sizeof(atl_priv_flags));
+		return;
+	}
+}
+
+#define atl_write_stats(stats, descs, data, type)	\
+do {							\
+	type *_stats = (type *)(stats);			\
+	int i;						\
+							\
+	for (i = 0; i < ARRAY_SIZE(descs); i++)		\
+		*(data)++ = _stats[descs[i].idx];	\
+} while (0)
+
+
+static void atl_get_ethtool_stats(struct net_device *ndev,
+	struct ethtool_stats *stats, u64 *data)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	int i;
+
+	atl_update_global_stats(nic);
+
+	atl_write_stats(&nic->stats.tx, tx_stat_descs, data, uint64_t);
+	atl_write_stats(&nic->stats.rx, rx_stat_descs, data, uint64_t);
+
+	atl_write_stats(&nic->stats.eth, eth_stat_descs, data, uint64_t);
+
+	for (i = 0; i < nic->nvecs; i++) {
+		struct atl_queue_vec *qvec = &nic->qvecs[i];
+		struct atl_ring_stats tmp;
+
+		atl_get_ring_stats(&qvec->tx, &tmp);
+		atl_write_stats(&tmp.tx, tx_stat_descs, data, uint64_t);
+		atl_get_ring_stats(&qvec->rx, &tmp);
+		atl_write_stats(&tmp.rx, rx_stat_descs, data, uint64_t);
+	}
+}
+
+static int atl_update_eee_pflags(struct atl_nic *nic)
+{
+	int ret = 0;
+	uint8_t prtad = 0;
+	uint32_t val;
+	uint16_t phy_val;
+	uint32_t flags = nic->priv_flags;
+	struct atl_link_type *link = nic->hw.link_state.link;
+	struct atl_hw *hw = &nic->hw;
+
+	flags &= ~ATL_PF_LPI_MASK;
+
+	if (!link || link->speed == 100)
+		goto done;
+
+	if (link->speed == 1000) {
+		ret = atl_mdio_read(hw, prtad, 3, 1, &phy_val);
+		if (ret)
+			goto done;
+
+		if (phy_val & BIT(9))
+			flags |= ATL_PF_BIT(LPI_TX_PHY);
+
+		if (phy_val & BIT(8))
+			flags |= ATL_PF_BIT(LPI_RX_PHY);
+	} else {
+		ret = atl_mdio_read(hw, prtad, 3, 0xc830, &phy_val);
+		if (ret)
+			goto done;
+
+		if (phy_val & BIT(0))
+			flags |= ATL_PF_BIT(LPI_TX_PHY);
+
+		ret = atl_mdio_read(hw, prtad, 3, 0xe834, &phy_val);
+		if (ret)
+			goto done;
+
+		if (phy_val & BIT(0))
+			flags |= ATL_PF_BIT(LPI_RX_PHY);
+
+	}
+
+	ret = atl_msm_read(&nic->hw, ATL_MSM_GEN_STS, &val);
+	if (ret)
+		goto done;
+
+	if (val & BIT(8))
+		flags |= ATL_PF_BIT(LPI_TX_MAC);
+	if (val & BIT(4))
+		flags |= ATL_PF_BIT(LPI_RX_MAC);
+
+done:
+	nic->priv_flags = flags;
+	return ret;
+}
+
+void atl_reset_stats(struct atl_nic *nic)
+{
+	struct atl_queue_vec *qvec;
+
+	/* Fetch current MSM stats */
+	atl_update_eth_stats(nic);
+
+	spin_lock(&nic->stats_lock);
+	/* Adding current relative values to base makes it equal to
+	 * current absolute values, thus zeroing the relative values. */
+	atl_adjust_eth_stats(&nic->stats.eth_base, &nic->stats.eth, true);
+
+	atl_for_each_qvec(nic, qvec) {
+		memset(&qvec->rx.stats, 0, sizeof(qvec->rx.stats));
+		memset(&qvec->tx.stats, 0, sizeof(qvec->tx.stats));
+	}
+
+	spin_unlock(&nic->stats_lock);
+}
+
+static int atl_set_pad_stripping(struct atl_nic *nic, bool on)
+{
+	struct atl_hw *hw = &nic->hw;
+	int ret;
+	uint32_t ctrl;
+
+	ret = atl_hwsem_get(hw, ATL_MCP_SEM_MSM);
+	if (ret)
+		return ret;
+
+	ret = __atl_msm_read(hw, ATL_MSM_GEN_CTRL, &ctrl);
+	if (ret)
+		goto unlock;
+
+	if (on)
+		ctrl |= BIT(5);
+	else
+		ctrl &= ~BIT(5);
+
+	ret = __atl_msm_write(hw, ATL_MSM_GEN_CTRL, ctrl);
+
+unlock:
+	atl_hwsem_put(hw, ATL_MCP_SEM_MSM);
+	return ret;
+}
+
+static uint32_t atl_get_priv_flags(struct net_device *ndev)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	atl_update_eee_pflags(nic);
+	return nic->priv_flags;
+}
+
+static int atl_set_priv_flags(struct net_device *ndev, uint32_t flags)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	uint32_t diff = flags ^ nic->priv_flags;
+	uint32_t curr = nic->priv_flags & ATL_PF_LPB_MASK;
+	uint32_t lpb = flags & ATL_PF_LPB_MASK;
+	int ret;
+
+	if (diff & ATL_PF_RO_MASK)
+		return -EINVAL;
+
+	if (diff & ~ATL_PF_RW_MASK)
+		return -EOPNOTSUPP;
+
+	if (flags & ATL_PF_BIT(STATS_RESET))
+		atl_reset_stats(nic);
+	flags &= ~ATL_PF_BIT(STATS_RESET);
+
+	if (diff & ATL_PF_BIT(STRIP_PAD)) {
+		ret = atl_set_pad_stripping(nic,
+			!!(flags & ATL_PF_BIT(STRIP_PAD)));
+		if (ret)
+			return ret;
+	}
+
+	if (hweight32(lpb) > 1) {
+		atl_nic_err("Can't enable more than one loopback simultaneously\n");
+		return -EINVAL;
+	}
+
+	if (lpb & ATL_PF_BIT(LPB_SYS_DMA) && !atl_rx_linear) {
+		atl_nic_err("System DMA loopback suported only in rx_linear mode\n");
+		return -EINVAL;
+	}
+
+	if (curr)
+		atl_set_loopback(nic, ffs(curr) - 1, false);
+
+	if (lpb)
+		atl_set_loopback(nic, ffs(lpb) - 1, true);
+
+	nic->priv_flags = flags;
+	return 0;
+}
+
+static int atl_get_coalesce(struct net_device *ndev,
+			    struct ethtool_coalesce *ec)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	memset(ec, 0, sizeof(*ec));
+	ec->rx_coalesce_usecs = nic->rx_intr_delay;
+	ec->tx_coalesce_usecs = nic->tx_intr_delay;
+
+	return 0;
+}
+
+static int atl_set_coalesce(struct net_device *ndev,
+			    struct ethtool_coalesce *ec)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	if (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce ||
+		ec->rx_max_coalesced_frames || ec->tx_max_coalesced_frames ||
+		ec->rx_max_coalesced_frames_irq || ec->rx_coalesce_usecs_irq ||
+		ec->tx_max_coalesced_frames_irq || ec->tx_coalesce_usecs_irq)
+		return -EOPNOTSUPP;
+
+	if (ec->rx_coalesce_usecs < atl_min_intr_delay ||
+		ec->tx_coalesce_usecs < atl_min_intr_delay) {
+		atl_nic_err("Interrupt coalescing delays less than min_intr_delay (%d uS) not supported\n",
+			atl_min_intr_delay);
+		return -EINVAL;
+	}
+
+	nic->rx_intr_delay = ec->rx_coalesce_usecs;
+	nic->tx_intr_delay = ec->tx_coalesce_usecs;
+
+	atl_set_intr_mod(nic);
+
+	return 0;
+}
+
+struct atl_rxf_flt_desc {
+	int base;
+	int count;
+	uint32_t rxq_bit;
+	int rxq_shift;
+	size_t cmd_offt;
+	size_t count_offt;
+	int (*get_rxf)(const struct atl_rxf_flt_desc *desc,
+		struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp);
+	int (*set_rxf)(const struct atl_rxf_flt_desc *desc,
+		struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp);
+	void (*update_rxf)(struct atl_nic *nic, int idx);
+	int (*check_rxf)(const struct atl_rxf_flt_desc *desc,
+		struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp);
+};
+
+#define atl_for_each_rxf_desc(_desc)				\
+for (_desc = atl_rxf_descs;					\
+	_desc < atl_rxf_descs + ARRAY_SIZE(atl_rxf_descs);	\
+	_desc++)
+
+#define atl_for_each_rxf_idx(_desc, _idx)		\
+	for (_idx = 0; _idx < _desc->count; _idx++)
+
+static inline int atl_rxf_idx(const struct atl_rxf_flt_desc *desc,
+	struct ethtool_rx_flow_spec *fsp)
+{
+	return fsp->location - desc->base;
+}
+
+static inline uint64_t atl_ring_cookie(const struct atl_rxf_flt_desc *desc,
+	uint32_t cmd)
+{
+	if (cmd & desc->rxq_bit)
+		return (cmd >> desc->rxq_shift) & ATL_RXF_RXQ_MSK;
+	else if (cmd & ATL_RXF_ACT_TOHOST)
+		return ATL_RXF_RING_ANY;
+	else
+		return RX_CLS_FLOW_DISC;
+}
+
+static int atl_rxf_get_vlan(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	int idx = atl_rxf_idx(desc, fsp);
+	uint32_t cmd = vlan->cmd[idx];
+
+	if (!(cmd & ATL_RXF_EN))
+		return -EINVAL;
+
+	fsp->flow_type = ETHER_FLOW | FLOW_EXT;
+	fsp->h_ext.vlan_tci = htons(cmd & ATL_VLAN_VID_MASK);
+	fsp->m_ext.vlan_tci = htons(BIT(12) - 1);
+	fsp->ring_cookie = atl_ring_cookie(desc, cmd);
+
+	return 0;
+}
+
+static int atl_rxf_get_etype(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_etype *etype = &nic->rxf_etype;
+	int idx = atl_rxf_idx(desc, fsp);
+	uint32_t cmd = etype->cmd[idx];
+
+	if (!(cmd & ATL_RXF_EN))
+		return -EINVAL;
+
+	fsp->flow_type = ETHER_FLOW;
+	fsp->m_u.ether_spec.h_proto = 0xffff;
+	fsp->h_u.ether_spec.h_proto = htons(cmd & ATL_ETYPE_VAL_MASK);
+	fsp->ring_cookie = atl_ring_cookie(desc, cmd);
+
+	return 0;
+}
+
+static inline void atl_ntuple_swap_v6(__be32 dst[4], __be32 src[4])
+{
+	int i;
+
+	for (i = 0; i < 4; i++)
+		dst[i] = src[3 - i];
+}
+
+static int atl_rxf_get_ntuple(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_ntuple *ntuples = &nic->rxf_ntuple;
+	uint32_t idx = atl_rxf_idx(desc, fsp);
+	uint32_t cmd = ntuples->cmd[idx];
+
+	if (!(cmd & ATL_RXF_EN))
+		return -EINVAL;
+
+	if (cmd & ATL_NTC_PROTO) {
+		switch (cmd & ATL_NTC_L4_MASK) {
+		case ATL_NTC_L4_TCP:
+			fsp->flow_type = cmd & ATL_NTC_V6 ?
+				TCP_V6_FLOW : TCP_V4_FLOW;
+			break;
+
+		case ATL_NTC_L4_UDP:
+			fsp->flow_type = cmd & ATL_NTC_V6 ?
+				UDP_V6_FLOW : UDP_V4_FLOW;
+			break;
+
+		case ATL_NTC_L4_SCTP:
+			fsp->flow_type = cmd & ATL_NTC_V6 ?
+				SCTP_V6_FLOW : SCTP_V4_FLOW;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+	} else {
+#ifdef ATL_HAVE_IPV6_NTUPLE
+		if (cmd & ATL_NTC_V6) {
+			fsp->flow_type = IPV6_USER_FLOW;
+		} else
+#endif
+		{
+			fsp->flow_type = IPV4_USER_FLOW;
+			fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+		}
+	}
+
+#ifdef ATL_HAVE_IPV6_NTUPLE
+	if (cmd & ATL_NTC_V6) {
+		struct ethtool_tcpip6_spec *rule = &fsp->h_u.tcp_ip6_spec;
+		struct ethtool_tcpip6_spec *mask = &fsp->m_u.tcp_ip6_spec;
+
+		if (cmd & ATL_NTC_SA) {
+			atl_ntuple_swap_v6(rule->ip6src,
+				ntuples->src_ip6[idx / 4]);
+			memset(mask->ip6src, 0xff, sizeof(mask->ip6src));
+		}
+
+		if (cmd & ATL_NTC_DA) {
+			atl_ntuple_swap_v6(rule->ip6dst,
+				ntuples->dst_ip6[idx / 4]);
+			memset(mask->ip6dst, 0xff, sizeof(mask->ip6dst));
+		}
+
+		if (cmd & ATL_NTC_SP) {
+			rule->psrc = ntuples->src_port[idx];
+			mask->psrc = -1;
+		}
+
+		if (cmd & ATL_NTC_DP) {
+			rule->pdst = ntuples->dst_port[idx];
+			mask->pdst = -1;
+		}
+	} else
+#endif
+	{
+		struct ethtool_tcpip4_spec *rule = &fsp->h_u.tcp_ip4_spec;
+		struct ethtool_tcpip4_spec *mask = &fsp->m_u.tcp_ip4_spec;
+
+		if (cmd & ATL_NTC_SA) {
+			rule->ip4src = ntuples->src_ip4[idx];
+			mask->ip4src = -1;
+		}
+
+		if (cmd & ATL_NTC_DA) {
+			rule->ip4dst = ntuples->dst_ip4[idx];
+			mask->ip4dst = -1;
+		}
+
+		if (cmd & ATL_NTC_SP) {
+			rule->psrc = ntuples->src_port[idx];
+			mask->psrc = -1;
+		}
+
+		if (cmd & ATL_NTC_DP) {
+			rule->pdst = ntuples->dst_port[idx];
+			mask->pdst = -1;
+		}
+	}
+
+	fsp->ring_cookie = atl_ring_cookie(desc, cmd);
+
+	return 0;
+}
+
+static int atl_get_rxf_locs(struct atl_nic *nic, struct ethtool_rxnfc *rxnfc,
+	uint32_t *rule_locs)
+{
+	struct atl_rxf_ntuple *ntuple = &nic->rxf_ntuple;
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	struct atl_rxf_etype *etype = &nic->rxf_etype;
+	int count = ntuple->count + vlan->count + etype->count;
+	int i;
+
+	if (rxnfc->rule_cnt < count)
+		return -EMSGSIZE;
+
+	for (i = 0; i < ATL_RXF_VLAN_MAX; i++)
+		if (vlan->cmd[i] & ATL_RXF_EN)
+			*rule_locs++ = i + ATL_RXF_VLAN_BASE;
+
+	for (i = 0; i < ATL_RXF_ETYPE_MAX; i++)
+		if (etype->cmd[i] & ATL_RXF_EN)
+			*rule_locs++ = i + ATL_RXF_ETYPE_BASE;
+
+	for (i = 0; i < ATL_RXF_NTUPLE_MAX; i++)
+		if (ntuple->cmd[i] & ATL_RXF_EN)
+			*rule_locs++ = i + ATL_RXF_NTUPLE_BASE;
+
+	rxnfc->rule_cnt = count;
+	return 0;
+}
+
+static int atl_check_mask(uint8_t *mask, int len, uint32_t *cmd, uint32_t flag)
+{
+	uint8_t first = mask[0];
+	uint8_t *p;
+
+	if (first != 0 && first != 0xff)
+		return -EINVAL;
+
+	for (p = mask; p < &mask[len]; p++)
+		if (*p != first)
+			return -EINVAL;
+
+	if (first == 0xff) {
+		if (cmd)
+			*cmd |= flag;
+		else
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int atl_rxf_set_ring(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp, uint32_t *cmd)
+{
+	uint64_t ring_cookie = fsp->ring_cookie;
+	uint32_t ring;
+
+	if (ring_cookie == RX_CLS_FLOW_DISC)
+		return 0;
+
+	ring = ethtool_get_flow_spec_ring(ring_cookie);
+	if (ring > ATL_RXF_RING_ANY ||
+		(ring >= nic->nvecs && ring != ATL_RXF_RING_ANY &&
+			!test_bit(ring, &nic->fwd.ring_map[ATL_FWDIR_RX]))) {
+		atl_nic_err("Invalid Rx filter queue %d\n", ring);
+		return -EINVAL;
+	}
+
+	if (ethtool_get_flow_spec_ring_vf(ring_cookie)) {
+		atl_nic_err("Rx filter queue VF must be zero");
+		return -EINVAL;
+	}
+
+	*cmd |= ATL_RXF_ACT_TOHOST;
+
+	if (ring != ATL_RXF_RING_ANY)
+		*cmd |= ring << desc->rxq_shift | desc->rxq_bit;
+
+	return 0;
+}
+
+static int atl_rxf_check_vlan_etype_common(struct ethtool_rx_flow_spec *fsp)
+{
+	int ret;
+
+	ret = atl_check_mask((uint8_t *)&fsp->m_u.ether_spec.h_source,
+		sizeof(fsp->m_u.ether_spec.h_source), NULL, 0);
+	if (ret)
+		return ret;
+
+	ret = atl_check_mask((uint8_t *)&fsp->m_ext.data,
+		sizeof(fsp->m_ext.data), NULL, 0);
+	if (ret)
+		return ret;
+
+	ret = atl_check_mask((uint8_t *)&fsp->m_ext.vlan_etype,
+		sizeof(fsp->m_ext.vlan_etype), NULL, 0);
+
+	return ret;
+}
+
+static int atl_rxf_check_vlan(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	uint16_t vid, mask;
+	int ret;
+
+	if (fsp->flow_type != (ETHER_FLOW | FLOW_EXT)) {
+		if (!(fsp->location & RX_CLS_LOC_SPECIAL))
+			atl_nic_err("Only ether flow-type supported for VLAN filters\n");
+		return -EINVAL;
+	}
+
+	ret = atl_rxf_check_vlan_etype_common(fsp);
+	if (ret)
+		return ret;
+
+	if (fsp->m_u.ether_spec.h_proto)
+		return -EINVAL;
+
+	vid = ntohs(fsp->h_ext.vlan_tci);
+	mask = ntohs(fsp->m_ext.vlan_tci);
+
+	if (mask & 0xf000 && vid & 0xf000 & mask)
+		return -EINVAL;
+
+	if ((mask & 0xfff) != 0xfff)
+		return -EINVAL;
+
+	return 0;
+}
+
+enum atl_rxf_vlan_idx {
+	ATL_VIDX_FOUND = BIT(31),
+	ATL_VIDX_FREE = BIT(30),
+	ATL_VIDX_REPL = BIT(29),
+	ATL_VIDX_NONE = BIT(28),
+	ATL_VIDX_MASK = BIT(28) - 1,
+};
+
+/* If a filter is enabled for VID, return its index ored with
+ * ATL_VIDX_FOUND.  Otherwise find an unused filter index and return
+ * it ored with ATL_VIDX_FREE.  If no unused filter exists and
+ * try_repl is set, try finding a candidate for replacement and return
+ * its index ored with ATL_VIDX_REPL. If all of the above fail,
+ * return ATL_VIDX_NONE.
+ *
+ * A replacement candidate filter must be configured to accept
+ * packets, not set to direct to a specific ring and must match a VID
+ * from a VLAN subinterface.
+ */
+static uint32_t atl_rxf_find_vid(struct atl_nic *nic, uint16_t vid,
+	bool try_repl)
+{
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	int idx, free = ATL_RXF_VLAN_MAX, repl = ATL_RXF_VLAN_MAX;
+
+	for (idx = 0; idx < ATL_RXF_VLAN_MAX; idx++) {
+		uint32_t cmd = vlan->cmd[idx];
+
+		if (!(cmd & ATL_RXF_EN)) {
+			if (free == ATL_RXF_VLAN_MAX) {
+				free = idx;
+				if (vid == -1)
+					break;
+			}
+			continue;
+		}
+
+		if ((cmd & ATL_VLAN_VID_MASK) == vid)
+			return idx | ATL_VIDX_FOUND;
+
+		if (try_repl && repl == ATL_RXF_VLAN_MAX &&
+			(cmd & ATL_RXF_ACT_TOHOST) &&
+			!(cmd & ATL_VLAN_RXQ)) {
+
+			if (!test_bit(cmd & ATL_VLAN_VID_MASK, vlan->map))
+				continue;
+
+			repl = idx;
+		}
+	}
+
+	if (free != ATL_RXF_VLAN_MAX)
+		return free | ATL_VIDX_FREE;
+
+	if (try_repl && repl != ATL_RXF_VLAN_MAX)
+		return repl | ATL_VIDX_REPL;
+
+	return ATL_VIDX_NONE;
+}
+
+static uint16_t atl_rxf_vid(struct atl_rxf_vlan *vlan, int idx)
+{
+	uint32_t cmd = vlan->cmd[idx];
+
+	return cmd & ATL_RXF_EN ? cmd & ATL_VLAN_VID_MASK : -1;
+}
+
+static int atl_rxf_dup_vid(struct atl_rxf_vlan *vlan, int idx, uint16_t vid)
+{
+	int i;
+
+	for (i = 0; i < ATL_RXF_VLAN_MAX; i++) {
+		if (i == idx)
+			continue;
+
+		if (atl_rxf_vid(vlan, i) == vid)
+			return i;
+	}
+
+	return -1;
+}
+
+static int atl_rxf_set_vlan(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	int idx;
+	int ret, promisc_delta = 0;
+	uint32_t cmd = ATL_RXF_EN;
+	int present;
+	uint16_t old_vid, vid = ntohs(fsp->h_ext.vlan_tci) & 0xfff;
+
+	if (!(fsp->location & RX_CLS_LOC_SPECIAL)) {
+		int dup;
+
+		idx = atl_rxf_idx(desc, fsp);
+		dup = atl_rxf_dup_vid(vlan, idx, vid);
+		if (dup >= 0) {
+			atl_nic_err("Can't add duplicate VLAN filter @%d (existing @%d)\n",
+				idx, dup);
+			return -EINVAL;
+		}
+
+		old_vid = atl_rxf_vid(vlan, idx);
+		if (old_vid != -1 && vid != old_vid &&
+			test_bit(old_vid, vlan->map)) {
+			atl_nic_err("Can't overwrite Linux VLAN filter @%d VID %hd with a different VID %hd\n",
+				idx, old_vid, vid);
+			return -EINVAL;
+		}
+
+		ret = atl_rxf_check_vlan(desc, nic, fsp);
+		if (ret)
+			return ret;
+
+	} else {
+		/* atl_rxf_check_vlan() already succeeded */
+		idx = atl_rxf_find_vid(nic, vid, true);
+
+		if (idx == ATL_VIDX_NONE)
+			return -EINVAL;
+
+		/* If a filter is being added for a VID without a
+		 * corresponding VLAN subdevice, and we're reusing a
+		 * filter previously used for a VLAN subdevice-covered
+		 * VID, the promisc count needs to be bumped (but
+		 * only if filter change succeeds). */
+		if ((idx & ATL_VIDX_REPL) && !test_bit(vid, vlan->map))
+			promisc_delta++;
+
+		idx &= ATL_VIDX_MASK;
+		fsp->location = idx + desc->base;
+	}
+
+	cmd |= vid;
+
+	ret = atl_rxf_set_ring(desc, nic, fsp, &cmd);
+	if (ret)
+		return ret;
+
+	/* If a VLAN subdevice exists, override filter to accept
+	 * packets */
+	if (test_bit(vid, vlan->map))
+		cmd |= ATL_RXF_ACT_TOHOST;
+
+	present = !!(vlan->cmd[idx] & ATL_RXF_EN);
+	vlan->cmd[idx] = cmd;
+	vlan->promisc_count += promisc_delta;
+
+	return !present;
+}
+
+static int atl_rxf_set_etype(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_etype *etype = &nic->rxf_etype;
+	int idx = atl_rxf_idx(desc, fsp);
+	int ret;
+	uint32_t cmd = ATL_RXF_EN;
+	int present = !!(etype->cmd[idx] & ATL_RXF_EN);
+
+	if (fsp->flow_type != (ETHER_FLOW)) {
+		atl_nic_err("Only ether flow-type supported for ethertype filters\n");
+		return -EINVAL;
+	}
+
+	ret = atl_rxf_check_vlan_etype_common(fsp);
+	if (ret)
+		return ret;
+
+	if (fsp->m_ext.vlan_tci)
+		return -EINVAL;
+
+	if (fsp->m_u.ether_spec.h_proto != 0xffff)
+		return -EINVAL;
+
+	cmd |= ntohs(fsp->h_u.ether_spec.h_proto);
+
+	ret = atl_rxf_set_ring(desc, nic, fsp, &cmd);
+	if (ret)
+		return ret;
+
+	etype->cmd[idx] = cmd;
+
+	return !present;
+}
+
+static int atl_rxf_set_ntuple(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_ntuple *ntuple = &nic->rxf_ntuple;
+	int idx = atl_rxf_idx(desc, fsp);
+	uint32_t cmd = ATL_NTC_EN;
+	int ret;
+	__be16 sport, dport;
+	int present = !!(ntuple->cmd[idx] & ATL_RXF_EN);
+
+	ret = atl_rxf_set_ring(desc, nic, fsp, &cmd);
+	if (ret)
+		return ret;
+
+	switch (fsp->flow_type) {
+#ifdef ATL_HAVE_IPV6_NTUPLE
+	case TCP_V6_FLOW:
+	case UDP_V6_FLOW:
+	case SCTP_V6_FLOW:
+		if (fsp->m_u.tcp_ip6_spec.tclass != 0) {
+			atl_nic_err("Unsupported match field\n");
+			return -EINVAL;
+		}
+		cmd |= ATL_NTC_PROTO | ATL_NTC_V6;
+		break;
+
+	case IPV6_USER_FLOW:
+		if (fsp->m_u.usr_ip6_spec.l4_4_bytes != 0 ||
+			fsp->m_u.usr_ip6_spec.tclass != 0 ||
+			fsp->m_u.usr_ip6_spec.l4_proto != 0) {
+			atl_nic_err("Unsupported match field\n");
+			return -EINVAL;
+		}
+		cmd |= ATL_NTC_V6;
+		break;
+#endif
+
+	case TCP_V4_FLOW:
+	case UDP_V4_FLOW:
+	case SCTP_V4_FLOW:
+		if (fsp->m_u.tcp_ip4_spec.tos != 0) {
+			atl_nic_err("Unsupported match field\n");
+			return -EINVAL;
+		}
+		cmd |= ATL_NTC_PROTO;
+		break;
+
+	case IPV4_USER_FLOW:
+		if (fsp->m_u.usr_ip4_spec.l4_4_bytes != 0 ||
+			fsp->m_u.usr_ip4_spec.tos != 0 ||
+			fsp->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
+			fsp->h_u.usr_ip4_spec.proto != 0) {
+			atl_nic_err("Unsupported match field\n");
+			return -EINVAL;
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	switch (fsp->flow_type) {
+	case TCP_V6_FLOW:
+	case TCP_V4_FLOW:
+		cmd |= ATL_NTC_L4_TCP;
+		break;
+
+	case UDP_V6_FLOW:
+	case UDP_V4_FLOW:
+		cmd |= ATL_NTC_L4_UDP;
+		break;
+
+	case SCTP_V6_FLOW:
+	case SCTP_V4_FLOW:
+		cmd |= ATL_NTC_L4_SCTP;
+		break;
+	}
+
+#ifdef ATL_HAVE_IPV6_NTUPLE
+	if (cmd & ATL_NTC_V6) {
+		int i;
+
+		if (idx & 3) {
+			atl_nic_err("IPv6 filters only supported in locations 8 and 12\n");
+			return -EINVAL;
+		}
+
+		for (i = idx + 1; i < idx + 4; i++)
+			if (ntuple->cmd[i] & ATL_NTC_EN) {
+				atl_nic_err("IPv6 filter %d overlaps an IPv4 filter %d\n",
+					    idx, i);
+				return -EINVAL;
+			}
+
+		ret = atl_check_mask((uint8_t *)fsp->m_u.tcp_ip6_spec.ip6src,
+			sizeof(fsp->m_u.tcp_ip6_spec.ip6src), &cmd, ATL_NTC_SA);
+		if (ret)
+			return ret;
+
+		ret = atl_check_mask((uint8_t *)fsp->m_u.tcp_ip6_spec.ip6dst,
+			sizeof(fsp->m_u.tcp_ip6_spec.ip6dst), &cmd, ATL_NTC_DA);
+		if (ret)
+			return ret;
+
+		sport = fsp->h_u.tcp_ip6_spec.psrc;
+		ret = atl_check_mask((uint8_t *)&fsp->m_u.tcp_ip6_spec.psrc,
+			sizeof(fsp->m_u.tcp_ip6_spec.psrc), &cmd, ATL_NTC_SP);
+		if (ret)
+			return ret;
+
+		dport = fsp->h_u.tcp_ip6_spec.pdst;
+		ret = atl_check_mask((uint8_t *)&fsp->m_u.tcp_ip6_spec.pdst,
+			sizeof(fsp->m_u.tcp_ip6_spec.pdst), &cmd, ATL_NTC_DP);
+		if (ret)
+			return ret;
+
+		if (cmd & ATL_NTC_SA)
+			atl_ntuple_swap_v6(ntuple->src_ip6[idx / 4],
+				fsp->h_u.tcp_ip6_spec.ip6src);
+
+		if (cmd & ATL_NTC_DA)
+			atl_ntuple_swap_v6(ntuple->dst_ip6[idx / 4],
+				fsp->h_u.tcp_ip6_spec.ip6dst);
+
+	} else
+#endif
+	{
+
+		ret = atl_check_mask((uint8_t *)&fsp->m_u.tcp_ip4_spec.ip4src,
+			sizeof(fsp->m_u.tcp_ip4_spec.ip4src), &cmd, ATL_NTC_SA);
+		if (ret)
+			return ret;
+
+		ret = atl_check_mask((uint8_t *)&fsp->m_u.tcp_ip4_spec.ip4dst,
+			sizeof(fsp->m_u.tcp_ip4_spec.ip4dst), &cmd, ATL_NTC_DA);
+		if (ret)
+			return ret;
+
+		sport = fsp->h_u.tcp_ip4_spec.psrc;
+		ret = atl_check_mask((uint8_t *)&fsp->m_u.tcp_ip4_spec.psrc,
+			sizeof(fsp->m_u.tcp_ip4_spec.psrc), &cmd, ATL_NTC_SP);
+		if (ret)
+			return ret;
+
+		dport = fsp->h_u.tcp_ip4_spec.pdst;
+		ret = atl_check_mask((uint8_t *)&fsp->m_u.tcp_ip4_spec.pdst,
+			sizeof(fsp->m_u.tcp_ip4_spec.psrc), &cmd, ATL_NTC_DP);
+		if (ret)
+			return ret;
+
+		if (cmd & ATL_NTC_SA)
+			ntuple->src_ip4[idx] = fsp->h_u.tcp_ip4_spec.ip4src;
+
+		if (cmd & ATL_NTC_DA)
+			ntuple->dst_ip4[idx] = fsp->h_u.tcp_ip4_spec.ip4dst;
+	}
+
+	if (cmd & ATL_NTC_SP)
+		ntuple->src_port[idx] = sport;
+
+	if (cmd & ATL_NTC_DP)
+		ntuple->dst_port[idx] = dport;
+
+	ntuple->cmd[idx] = cmd;
+
+	return !present;
+}
+
+static void atl_rxf_update_vlan(struct atl_nic *nic, int idx)
+{
+	atl_write(&nic->hw, ATL_RX_VLAN_FLT(idx), nic->rxf_vlan.cmd[idx]);
+}
+
+static void atl_rxf_update_etype(struct atl_nic *nic, int idx)
+{
+	atl_write(&nic->hw, ATL_RX_ETYPE_FLT(idx), nic->rxf_etype.cmd[idx]);
+}
+
+static const struct atl_rxf_flt_desc atl_rxf_descs[] = {
+	{
+		.base = ATL_RXF_VLAN_BASE,
+		.count = ATL_RXF_VLAN_MAX,
+		.rxq_bit = ATL_VLAN_RXQ,
+		.rxq_shift = ATL_VLAN_RXQ_SHIFT,
+		.cmd_offt = offsetof(struct atl_nic, rxf_vlan.cmd),
+		.count_offt = offsetof(struct atl_nic, rxf_vlan.count),
+		.get_rxf = atl_rxf_get_vlan,
+		.set_rxf = atl_rxf_set_vlan,
+		.update_rxf = atl_rxf_update_vlan,
+		.check_rxf = atl_rxf_check_vlan,
+	},
+	{
+		.base = ATL_RXF_ETYPE_BASE,
+		.count = ATL_RXF_ETYPE_MAX,
+		.rxq_bit = ATL_ETYPE_RXQ,
+		.rxq_shift = ATL_ETYPE_RXQ_SHIFT,
+		.cmd_offt = offsetof(struct atl_nic, rxf_etype.cmd),
+		.count_offt = offsetof(struct atl_nic, rxf_etype.count),
+		.get_rxf = atl_rxf_get_etype,
+		.set_rxf = atl_rxf_set_etype,
+		.update_rxf = atl_rxf_update_etype,
+	},
+	{
+		.base = ATL_RXF_NTUPLE_BASE,
+		.count = ATL_RXF_NTUPLE_MAX,
+		.rxq_bit = ATL_NTC_RXQ,
+		.rxq_shift = ATL_NTC_RXQ_SHIFT,
+		.cmd_offt = offsetof(struct atl_nic, rxf_ntuple.cmd),
+		.count_offt = offsetof(struct atl_nic, rxf_ntuple.count),
+		.get_rxf = atl_rxf_get_ntuple,
+		.set_rxf = atl_rxf_set_ntuple,
+		.update_rxf = atl_update_ntuple_flt,
+	},
+};
+
+static uint32_t *atl_rxf_cmd(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic)
+{
+	return (uint32_t *)((char *)nic + desc->cmd_offt);
+}
+
+static int *atl_rxf_count(const struct atl_rxf_flt_desc *desc, struct atl_nic *nic)
+{
+	return (int *)((char *)nic + desc->count_offt);
+}
+
+static const struct atl_rxf_flt_desc *atl_rxf_desc(struct atl_nic *nic,
+	struct ethtool_rx_flow_spec *fsp)
+{
+	uint32_t loc = fsp->location;
+	const struct atl_rxf_flt_desc *desc;
+
+	atl_for_each_rxf_desc(desc) {
+		if (loc & RX_CLS_LOC_SPECIAL) {
+			if (desc->check_rxf && !desc->check_rxf(desc, nic, fsp))
+				return desc;
+
+			continue;
+		}
+
+		if (loc < desc->base)
+			return NULL;
+
+		if (loc < desc->base + desc->count)
+			return desc;
+	}
+
+	return NULL;
+}
+
+static void atl_refresh_rxf_desc(struct atl_nic *nic,
+	const struct atl_rxf_flt_desc *desc)
+{
+	int idx;
+
+	atl_for_each_rxf_idx(desc, idx)
+		desc->update_rxf(nic, idx);
+
+	atl_set_vlan_promisc(&nic->hw, nic->rxf_vlan.promisc_count);
+}
+
+void atl_refresh_rxfs(struct atl_nic *nic)
+{
+	const struct atl_rxf_flt_desc *desc;
+
+	atl_for_each_rxf_desc(desc)
+		atl_refresh_rxf_desc(nic, desc);
+
+	atl_set_vlan_promisc(&nic->hw, nic->rxf_vlan.promisc_count);
+}
+
+static bool atl_vlan_pull_from_promisc(struct atl_nic *nic, uint32_t idx)
+{
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	unsigned long *map;
+	int i;
+	long vid = -1;
+
+	if (!vlan->promisc_count)
+		return false;
+
+	map = kcalloc(ATL_VID_MAP_LEN, sizeof(*map), GFP_KERNEL);
+	if (!map)
+		return false;
+
+	memcpy(map, vlan->map, ATL_VID_MAP_LEN * sizeof(*map));
+	for (i = 0; i < ATL_RXF_VLAN_MAX; i++) {
+		uint32_t cmd = vlan->cmd[i];
+
+		if (cmd & ATL_RXF_EN)
+			clear_bit(cmd & ATL_VLAN_VID_MASK, map);
+	}
+
+	do {
+		idx &= ATL_VIDX_MASK;
+		vid = find_next_bit(map, BIT(12), vid + 1);
+		vlan->cmd[idx] = ATL_RXF_EN | ATL_RXF_ACT_TOHOST | vid;
+		atl_rxf_update_vlan(nic, idx);
+		__clear_bit(vid, map);
+		vlan->promisc_count--;
+		vlan->count++;
+		if (vlan->promisc_count == 0)
+			break;
+
+		idx = atl_rxf_find_vid(nic, -1, false);
+	} while (idx & ATL_VIDX_FREE);
+
+	kfree(map);
+	atl_set_vlan_promisc(&nic->hw, vlan->promisc_count);
+	return true;
+}
+
+static bool atl_rxf_del_vlan_override(const struct atl_rxf_flt_desc *desc,
+	struct atl_nic *nic, struct ethtool_rx_flow_spec *fsp)
+{
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	uint32_t *cmd = &vlan->cmd[atl_rxf_idx(desc, fsp)];
+	uint16_t vid = *cmd & ATL_VLAN_VID_MASK;
+
+	if (!test_bit(vid, vlan->map))
+		return false;
+
+	/* Trying to delete filter via ethtool while VLAN subdev still
+	 * exists. Just drop queue assignment. */
+	*cmd &= ~ATL_VLAN_RXQ;
+	return true;
+}
+
+static int atl_set_rxf(struct atl_nic *nic,
+	struct ethtool_rx_flow_spec *fsp, bool delete)
+{
+	const struct atl_rxf_flt_desc *desc;
+	uint32_t *cmd;
+	int *count, ret, idx;
+
+	desc = atl_rxf_desc(nic, fsp);
+	if (!desc)
+		return -EINVAL;
+
+	count = atl_rxf_count(desc, nic);
+
+	if (delete) {
+		idx = atl_rxf_idx(desc, fsp);
+		cmd = &atl_rxf_cmd(desc, nic)[idx];
+
+		if (!(*cmd & ATL_RXF_EN))
+			/* Attempting to delete non-existent filter */
+			return -EINVAL;
+
+		if (desc->base == ATL_RXF_VLAN_BASE &&
+			atl_rxf_del_vlan_override(desc, nic, fsp))
+			goto done;
+
+		*cmd = 0;
+		(*count)--;
+
+		if (desc->base == ATL_RXF_VLAN_BASE &&
+			atl_vlan_pull_from_promisc(nic, idx))
+			/* Filter already updated by
+			 * atl_vlan_pull_from_promisc(), can just
+			 * return */
+			return 0;
+	} else {
+		ret = desc->set_rxf(desc, nic, fsp);
+		if (ret < 0)
+			return ret;
+
+		/* fsp->location may have been set in
+		 * ->set_rxf(). Guaranteed to be valid now. */
+		idx = atl_rxf_idx(desc, fsp);
+		*count += ret;
+	}
+
+done:
+	desc->update_rxf(nic, idx);
+	return 0;
+}
+
+static int atl_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
+	uint32_t *rule_locs)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct ethtool_rx_flow_spec *fsp = &rxnfc->fs;
+	int ret = -ENOTSUPP;
+	const struct atl_rxf_flt_desc *desc;
+
+	switch (rxnfc->cmd) {
+	case ETHTOOL_GRXRINGS:
+		rxnfc->data = nic->nvecs;
+		return 0;
+
+	case ETHTOOL_GRXCLSRLCNT:
+		rxnfc->rule_cnt = nic->rxf_ntuple.count + nic->rxf_vlan.count +
+			nic->rxf_etype.count;
+		rxnfc->data = (ATL_RXF_VLAN_MAX + ATL_RXF_ETYPE_MAX +
+			ATL_RXF_NTUPLE_MAX) | RX_CLS_LOC_SPECIAL;
+		return 0;
+
+	case ETHTOOL_GRXCLSRULE:
+		desc = atl_rxf_desc(nic, fsp);
+		if (!desc)
+			return -EINVAL;
+
+		memset(&fsp->h_u, 0, sizeof(fsp->h_u));
+		memset(&fsp->m_u, 0, sizeof(fsp->m_u));
+		memset(&fsp->h_ext, 0, sizeof(fsp->h_ext));
+		memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
+
+		ret = desc->get_rxf(desc, nic, fsp);
+		break;
+
+	case ETHTOOL_GRXCLSRLALL:
+		ret = atl_get_rxf_locs(nic, rxnfc, rule_locs);
+		break;
+
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static int atl_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct ethtool_rx_flow_spec *fsp = &rxnfc->fs;
+
+	switch (rxnfc->cmd) {
+	case ETHTOOL_SRXCLSRLINS:
+		return atl_set_rxf(nic, fsp, false);
+
+	case ETHTOOL_SRXCLSRLDEL:
+		return atl_set_rxf(nic, fsp, true);
+	}
+
+	return -ENOTSUPP;
+}
+
+int atl_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	int idx;
+
+	atl_nic_dbg("Add vlan id %hd\n", vid);
+
+	vid &= 0xfff;
+	if (__test_and_set_bit(vid, vlan->map))
+		/* Already created -- shouldn't happen? */
+		return 0;
+
+	vlan->vlans_active++;
+	idx = atl_rxf_find_vid(nic, vid, false);
+
+	if (idx == ATL_VIDX_NONE) {
+		/* VID not found and no unused filters */
+		vlan->promisc_count++;
+		atl_set_vlan_promisc(&nic->hw, vlan->promisc_count);
+		return 0;
+	}
+
+	if (idx & ATL_VIDX_FREE) {
+		/* VID not found, program unused filter */
+		idx &= ATL_VIDX_MASK;
+		vlan->cmd[idx] = ATL_VLAN_EN | ATL_RXF_ACT_TOHOST | vid;
+		vlan->count++;
+		atl_rxf_update_vlan(nic, idx);
+		return 0;
+	}
+
+	idx &= ATL_VIDX_MASK;
+	if (vlan->cmd[idx]  & ATL_RXF_ACT_TOHOST)
+		/* VID already added via ethtool */
+		return 0;
+
+	/* Ethtool filter set to drop. Override. */
+	atl_nic_warn("%s: Overriding VLAN filter for VID %hd @%d set to drop\n",
+		__func__, vid, idx);
+
+	vlan->cmd[idx] = ATL_RXF_EN | ATL_RXF_ACT_TOHOST | vid;
+	atl_rxf_update_vlan(nic, idx);
+	return 0;
+}
+
+int atl_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_rxf_vlan *vlan = &nic->rxf_vlan;
+	uint32_t cmd;
+	int idx;
+
+	atl_nic_dbg("Kill vlan id %hd\n", vid);
+
+	vid &= 0xfff;
+	if (!__test_and_clear_bit(vid, vlan->map))
+		return -EINVAL;
+
+	vlan->vlans_active--;
+
+	idx = atl_rxf_find_vid(nic, vid, false);
+	if (!(idx & ATL_VIDX_FOUND)) {
+		/* VID not present in filters, decrease promisc count */
+		vlan->promisc_count--;
+		atl_set_vlan_promisc(&nic->hw, vlan->promisc_count);
+		return 0;
+	}
+
+	idx &= ATL_VIDX_MASK;
+	cmd = vlan->cmd[idx];
+	if (cmd & ATL_VLAN_RXQ)
+		/* Queue explicitly set via ethtool, leave the filter
+		 * intact */
+		return 0;
+
+	/* Delete filter, maybe pull vid from promisc overflow */
+	vlan->cmd[idx] = 0;
+	vlan->count--;
+	if (!atl_vlan_pull_from_promisc(nic, idx))
+		atl_rxf_update_vlan(nic, idx);
+
+	return 0;
+}
+
+static void atl_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	wol->supported = WAKE_MAGIC;
+	wol->wolopts = nic->flags & ATL_FL_WOL ? WAKE_MAGIC : 0;
+}
+
+static int atl_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	int ret;
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	if (wol->wolopts & ~WAKE_MAGIC) {
+		atl_nic_err("%s: unsupported WoL mode %x\n", __func__,
+			wol->wolopts);
+		return -EINVAL;
+	}
+
+	if (wol->wolopts & WAKE_MAGIC)
+		nic->flags |= ATL_FL_WOL;
+	else
+		nic->flags &= ~ATL_FL_WOL;
+
+	ret = device_set_wakeup_enable(&nic->hw.pdev->dev,
+		!!(nic->flags & ATL_FL_WOL));
+
+	if (ret)
+		atl_nic_err("device_set_wakeup_enable failed: %d\n", -ret);
+
+	return ret;
+}
+
+const struct ethtool_ops atl_ethtool_ops = {
+	.get_link = atl_ethtool_get_link,
+#ifndef ATL_HAVE_ETHTOOL_KSETTINGS
+	.get_settings = atl_ethtool_get_settings,
+	.set_settings = atl_ethtool_set_settings,
+#else
+	.get_link_ksettings = atl_ethtool_get_ksettings,
+	.set_link_ksettings = atl_ethtool_set_ksettings,
+#endif
+
+	.get_rxfh_indir_size = atl_rss_tbl_size,
+	.get_rxfh_key_size = atl_rss_key_size,
+	.get_rxfh = atl_rss_get_rxfh,
+	.set_rxfh = atl_rss_set_rxfh,
+	.get_channels = atl_get_channels,
+	.set_channels = atl_set_channels,
+	.get_rxnfc = atl_get_rxnfc,
+	.set_rxnfc = atl_set_rxnfc,
+	.get_pauseparam = atl_get_pauseparam,
+	.set_pauseparam = atl_set_pauseparam,
+	.get_eee = atl_get_eee,
+	.set_eee = atl_set_eee,
+	.get_drvinfo = atl_get_drvinfo,
+	.nway_reset = atl_nway_reset,
+	.get_ringparam = atl_get_ringparam,
+	.set_ringparam = atl_set_ringparam,
+	.get_sset_count = atl_get_sset_count,
+	.get_strings = atl_get_strings,
+	.get_ethtool_stats = atl_get_ethtool_stats,
+	.get_priv_flags = atl_get_priv_flags,
+	.set_priv_flags = atl_set_priv_flags,
+	.get_coalesce = atl_get_coalesce,
+	.set_coalesce = atl_set_coalesce,
+	.get_wol = atl_get_wol,
+	.set_wol = atl_set_wol,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.c
new file mode 100644
index 0000000..86bc2eb
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.c
@@ -0,0 +1,470 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+#include "atl_common.h"
+#include "atl_hw.h"
+#include "atl_drviface.h"
+
+struct atl_link_type atl_link_types[] = {
+#define LINK_TYPE(_name, _speed, _ethtl_idx, _fw1_bit, _fw2_bit)	\
+	{								\
+		.name = _name,						\
+		.speed = _speed,					\
+		.ethtool_idx = _ethtl_idx,				\
+		.fw_bits = {						\
+		[0] = _fw1_bit,						\
+		[1] = _fw2_bit,						\
+		},							\
+	},
+
+	LINK_TYPE("100BaseTX-FD", 100, ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+		0x20, 1 << 5)
+	LINK_TYPE("1000BaseT-FD", 1000, ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+		0x10, 1 << 8)
+	LINK_TYPE("2.5GBaseT-FD", 2500, ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+		8, 1 << 9)
+	LINK_TYPE("5GBaseT-FD", 5000, ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+		2, 1 << 10)
+	LINK_TYPE("10GBaseT-FD", 10000, ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+		1, 1 << 11)
+};
+
+const int atl_num_rates = ARRAY_SIZE(atl_link_types);
+
+static inline void atl_lock_fw(struct atl_hw *hw)
+{
+	mutex_lock(&hw->mcp.lock);
+}
+
+static inline void atl_unlock_fw(struct atl_hw *hw)
+{
+	mutex_unlock(&hw->mcp.lock);
+}
+
+static int atl_fw1_wait_fw_init(struct atl_hw *hw)
+{
+	uint32_t hostData_addr;
+	uint32_t id, new_id;
+	int ret;
+
+	mdelay(10);
+
+	busy_wait(2000, mdelay(1), hostData_addr,
+		  atl_read(hw, ATL_MCP_SCRATCH(FW_STAT_STRUCT)),
+		  hostData_addr == 0);
+
+	atl_dev_dbg("got hostData address: 0x%x\n", hostData_addr);
+
+	ret = atl_read_mcp_mem(hw, hostData_addr + 4, &id, 4);
+	if (ret)
+		return  ret;
+
+	busy_wait(10000, mdelay(1), ret,
+		  atl_read_mcp_mem(hw, hostData_addr + 4, &new_id, 4),
+		  !ret && new_id == id);
+	if (ret)
+		return ret;
+	if (new_id == id) {
+		atl_dev_err("timeout waiting for FW to start (initial transactionId 0x%x, hostData addr 0x%x)\n",
+			    id, hostData_addr);
+		return -EIO;
+	}
+
+	/* return fw1_wait_drviface(hw, NULL); */
+	return 0;
+}
+
+static int atl_fw2_wait_fw_init(struct atl_hw *hw)
+{
+	uint32_t reg;
+
+	busy_wait(1000, mdelay(1), reg, atl_read(hw, ATL_GLOBAL_FW_IMAGE_ID),
+		!reg);
+	if (!reg)
+		return -EIO;
+	return 0;
+}
+
+static struct atl_link_type *atl_parse_fw_bits(struct atl_hw *hw,
+	uint32_t low, uint32_t high, int fw_idx)
+{
+	struct atl_link_state *lstate = &hw->link_state;
+	unsigned int lp_adv = 0, adv = lstate->advertized;
+	struct atl_link_type *link;
+	bool eee = false;
+	int last = -1;
+	int i;
+
+	atl_for_each_rate(i, link) {
+		uint32_t link_bit = link->fw_bits[fw_idx];
+
+		if (!(low & link_bit))
+			continue;
+
+		if (high & link_bit)
+			lp_adv |= BIT(i + ATL_EEE_BIT_OFFT);
+
+		lp_adv |= BIT(i);
+		if (adv & BIT(i))
+			last = i;
+	}
+
+	lstate->lp_advertized = lp_adv;
+
+	link = 0;
+	if (last >= 0) {
+		link = &atl_link_types[last];
+		if ((lp_adv & BIT(last + ATL_EEE_BIT_OFFT)) &&
+			(adv & BIT(last + ATL_EEE_BIT_OFFT)))
+			eee = true;
+	}
+
+	lstate->link = link;
+	lstate->eee = eee;
+	return link;
+}
+
+static struct atl_link_type *atl_fw1_check_link(struct atl_hw *hw)
+{
+	uint32_t reg;
+	struct atl_link_type *link;
+
+	atl_lock_fw(hw);
+	reg = atl_read(hw, ATL_MCP_SCRATCH(FW1_LINK_STS));
+
+	if ((reg & 0xf) != 2)
+		reg = 0;
+
+	reg = (reg >> 16) & 0xff;
+
+	link = atl_parse_fw_bits(hw, reg, 0, 0);
+
+	atl_unlock_fw(hw);
+	return link;
+}
+
+static struct atl_link_type *atl_fw2_check_link(struct atl_hw *hw)
+{
+	struct atl_link_type *link;
+	struct atl_link_state *lstate = &hw->link_state;
+	uint32_t low;
+	uint32_t high;
+	enum atl_fc_mode fc = atl_fc_none;
+
+	atl_lock_fw(hw);
+
+	low = atl_read(hw, ATL_MCP_SCRATCH(FW2_LINK_RES_LOW));
+	high = atl_read(hw, ATL_MCP_SCRATCH(FW2_LINK_RES_HIGH));
+
+	link = atl_parse_fw_bits(hw, low, high, 1);
+	if (!link)
+		goto unlock;
+
+	if (high & atl_fw2_pause)
+		fc |= atl_fc_rx;
+	if (high & atl_fw2_asym_pause)
+		fc |= atl_fc_tx;
+
+	lstate->fc.cur = fc;
+
+unlock:
+	atl_unlock_fw(hw);
+	return link;
+}
+
+static int atl_fw1_get_link_caps(struct atl_hw *hw)
+{
+	return 0;
+}
+
+static int atl_fw2_get_link_caps(struct atl_hw *hw)
+{
+	uint32_t fw_stat_addr = hw->mcp.fw_stat_addr;
+	unsigned int supported = 0;
+	uint32_t caps[2];
+	int i, ret;
+
+	atl_lock_fw(hw);
+
+	atl_dev_dbg("Host data struct addr: %#x\n", fw_stat_addr);
+	ret = atl_read_mcp_mem(hw, fw_stat_addr + atl_fw2_stat_lcaps,
+		caps, 8);
+	if (ret)
+		goto unlock;
+
+	for (i = 0; i < atl_num_rates; i++)
+		if (atl_link_types[i].fw_bits[1] & caps[0]) {
+			supported |= BIT(i);
+			if (atl_link_types[i].fw_bits[1] & caps[1])
+				supported |= BIT(i + ATL_EEE_BIT_OFFT);
+		}
+
+	hw->link_state.supported = supported;
+
+unlock:
+	atl_unlock_fw(hw);
+	return ret;
+}
+
+static inline unsigned int atl_link_adv(struct atl_link_state *lstate)
+{
+	return lstate->force_off ? 0 : lstate->advertized;
+}
+
+static inline bool atl_fw1_set_link_needed(struct atl_link_state *lstate)
+{
+	bool ret = false;
+
+	if (atl_link_adv(lstate) != lstate->prev_advertized) {
+		ret = true;
+		lstate->prev_advertized = atl_link_adv(lstate);
+	}
+
+	return ret;
+}
+
+static inline bool atl_fw2_set_link_needed(struct atl_link_state *lstate)
+{
+	struct atl_fc_state *fc = &lstate->fc;
+	bool ret = false;
+
+	if (fc->req != fc->prev_req) {
+		ret = true;
+		fc->prev_req = fc->req;
+	}
+
+	return atl_fw1_set_link_needed(lstate) || ret;
+}
+
+static uint64_t atl_set_fw_bits(struct atl_hw *hw, int fw_idx)
+{
+	unsigned int adv = atl_link_adv(&hw->link_state);
+	struct atl_link_type *ltype;
+	uint64_t link = 0;
+	int i;
+
+	atl_for_each_rate(i, ltype) {
+		uint32_t bit = ltype->fw_bits[fw_idx];
+
+		if (adv & BIT(i)) {
+			link |= bit;
+			if (adv & BIT(i + ATL_EEE_BIT_OFFT))
+				link |= (uint64_t)bit << 32;
+		}
+	}
+
+	return link;
+}
+
+static void atl_fw1_set_link(struct atl_hw *hw, bool force)
+{
+	uint32_t bits;
+
+	if (!force && !atl_fw1_set_link_needed(&hw->link_state))
+		return;
+
+	atl_lock_fw(hw);
+
+	bits = (atl_set_fw_bits(hw, 0) << 16) | 2;
+	atl_write(hw, ATL_MCP_SCRATCH(FW1_LINK_REQ), bits);
+
+	atl_unlock_fw(hw);
+}
+
+static void atl_fw2_set_link(struct atl_hw *hw, bool force)
+{
+	struct atl_link_state *lstate = &hw->link_state;
+	uint32_t hi_bits = 0;
+	uint64_t bits;
+
+	if (!force && !atl_fw2_set_link_needed(lstate))
+		return;
+
+	atl_lock_fw(hw);
+
+	if (lstate->fc.req & atl_fc_rx)
+		hi_bits |= atl_fw2_pause | atl_fw2_asym_pause;
+
+	if (lstate->fc.req & atl_fc_tx)
+		hi_bits ^= atl_fw2_asym_pause;
+
+	bits = atl_set_fw_bits(hw, 1);
+
+	hi_bits |= bits >> 32;
+
+	atl_write(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_LOW), bits);
+	atl_write(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_HIGH), hi_bits);
+
+	atl_unlock_fw(hw);
+}
+
+static int atl_fw1_unsupported(struct atl_hw *hw)
+{
+	return -EOPNOTSUPP;
+}
+
+static int atl_fw2_restart_aneg(struct atl_hw *hw)
+{
+	atl_lock_fw(hw);
+	atl_set_bits(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_HIGH), BIT(31));
+	atl_unlock_fw(hw);
+	return 0;
+}
+
+static void atl_fw1_set_default_link(struct atl_hw *hw)
+{
+	struct atl_link_state *lstate = &hw->link_state;
+
+	lstate->autoneg = true;
+	lstate->advertized = hw->link_state.supported;
+}
+
+static void atl_fw2_set_default_link(struct atl_hw *hw)
+{
+	struct atl_link_state *lstate = &hw->link_state;
+
+	atl_fw1_set_default_link(hw);
+	lstate->fc.req = atl_fc_full;
+	lstate->eee_enabled = 1;
+}
+
+static int atl_fw2_enable_wol(struct atl_hw *hw)
+{
+	int ret;
+	struct offloadInfo *info;
+	struct drvIface *msg;
+	uint32_t val, wol_bits = atl_fw2_nic_proxy | atl_fw2_wol;
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	info = &msg->fw2xOffloads;
+	info->version = 0;
+	info->len = sizeof(*info);
+	memcpy(info->macAddr, hw->mac_addr, ETH_ALEN);
+
+	atl_lock_fw(hw);
+
+	ret = atl_write_mcp_mem(hw, 0, msg,
+		(info->len + offsetof(struct drvIface, fw2xOffloads) + 3) & ~3);
+	if (ret) {
+		atl_dev_err("Failed to upload sleep proxy info to FW\n");
+		goto free;
+	}
+
+	atl_write(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_LOW), 0);
+	atl_write(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_HIGH), wol_bits);
+	busy_wait(100, mdelay(1), val,
+		atl_read(hw, ATL_MCP_SCRATCH(FW2_LINK_RES_HIGH)),
+		(val & wol_bits) != wol_bits);
+
+	ret = (val & wol_bits) == wol_bits ? 0 : -EIO;
+	if (ret)
+		atl_dev_err("Timeout waiting for WoL enable\n");
+
+free:
+	atl_unlock_fw(hw);
+	kfree(msg);
+	return ret;
+}
+
+int atl_read_fwstat_word(struct atl_hw *hw, uint32_t offt, uint32_t *val)
+{
+	int ret;
+	uint32_t addr = hw->mcp.fw_stat_addr + (offt & ~3);
+
+	ret = atl_read_mcp_mem(hw, addr, val, 4);
+	if (ret)
+		return ret;
+
+	*val >>= 8 * (offt & 3);
+	return 0;
+}
+
+static int atl_fw2_get_phy_temperature(struct atl_hw *hw, int *temp)
+{
+	uint32_t req, res;
+	int ret = 0;
+
+	atl_lock_fw(hw);
+
+	req = atl_read(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_HIGH));
+	req ^= atl_fw2_phy_temp;
+	atl_write(hw, ATL_MCP_SCRATCH(FW2_LINK_REQ_HIGH), req);
+
+	busy_wait(1000, udelay(10), res,
+		atl_read(hw, ATL_MCP_SCRATCH(FW2_LINK_RES_HIGH)),
+		((res ^ req) & atl_fw2_phy_temp) != 0);
+	if (((res ^ req) & atl_fw2_phy_temp) != 0) {
+		atl_dev_err("Timeout waiting for PHY temperature\n");
+		ret = -EIO;
+		goto unlock;
+	}
+
+	ret = atl_read_fwstat_word(hw, atl_fw2_stat_temp, &res);
+	if (ret)
+		goto unlock;
+
+	*temp = (res & 0xffff) * 1000 / 256;
+
+unlock:
+	atl_unlock_fw(hw);
+	return ret;
+}
+
+static struct atl_fw_ops atl_fw_ops[2] = {
+	[0] = {
+		.wait_fw_init = atl_fw1_wait_fw_init,
+		.set_link = atl_fw1_set_link,
+		.check_link = atl_fw1_check_link,
+		.get_link_caps = atl_fw1_get_link_caps,
+		.restart_aneg = atl_fw1_unsupported,
+		.set_default_link = atl_fw1_set_default_link,
+		.enable_wol = atl_fw1_unsupported,
+		.get_phy_temperature = (void *)atl_fw1_unsupported,
+		.efuse_shadow_addr_reg = ATL_MCP_SCRATCH(FW1_EFUSE_SHADOW),
+	},
+	[1] = {
+		.wait_fw_init = atl_fw2_wait_fw_init,
+		.set_link = atl_fw2_set_link,
+		.check_link = atl_fw2_check_link,
+		.get_link_caps = atl_fw2_get_link_caps,
+		.restart_aneg = atl_fw2_restart_aneg,
+		.set_default_link = atl_fw2_set_default_link,
+		.enable_wol = atl_fw2_enable_wol,
+		.get_phy_temperature = atl_fw2_get_phy_temperature,
+		.efuse_shadow_addr_reg = ATL_MCP_SCRATCH(FW2_EFUSE_SHADOW),
+	},
+};
+
+int atl_fw_init(struct atl_hw *hw)
+{
+	uint32_t tries, reg, major;
+
+	tries = busy_wait(10000, mdelay(1), reg, atl_read(hw, 0x18), !reg);
+	if (!reg) {
+		atl_dev_err("Timeout waiting for FW version\n");
+		return -EIO;
+	}
+	atl_dev_dbg("FW startup took %d ms\n", tries);
+
+	major = (reg >> 24) & 0xff;
+	if (!major || major > 3) {
+		atl_dev_err("Unsupported FW major version: %u\n", major);
+		return -EINVAL;
+	}
+	if (major > 2)
+		major--;
+	hw->mcp.ops = &atl_fw_ops[major - 1];
+	hw->mcp.poll_link = major == 1;
+	hw->mcp.fw_rev = reg;
+	hw->mcp.fw_stat_addr = atl_read(hw, ATL_MCP_SCRATCH(FW_STAT_STRUCT));
+
+	return hw->mcp.ops->wait_fw_init(hw);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.h
new file mode 100644
index 0000000..a3712e2
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fw.h
@@ -0,0 +1,94 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_FW_H_
+#define _ATL_FW_H_
+
+struct atl_hw;
+
+struct atl_link_type {
+	unsigned speed;
+	unsigned ethtool_idx;
+	uint32_t fw_bits[2];
+	const char *name;
+};
+
+extern struct atl_link_type atl_link_types[];
+extern const int atl_num_rates;
+
+#define atl_for_each_rate(idx, type)		\
+	for (idx = 0, type = atl_link_types;	\
+	     idx < atl_num_rates;		\
+	     idx++, type++)
+
+#define atl_define_bit(_name, _bit)		\
+	_name ## _shift = (_bit),		\
+	_name = BIT(_name ## _shift),
+
+enum atl_fw2_opts {
+	atl_define_bit(atl_fw2_pause, 3)
+	atl_define_bit(atl_fw2_asym_pause, 4)
+	atl_fw2_pause_mask = atl_fw2_pause | atl_fw2_asym_pause,
+	atl_define_bit(atl_fw2_phy_temp, 18)
+	atl_define_bit(atl_fw2_nic_proxy, 0x17)
+	atl_define_bit(atl_fw2_wol, 0x18)
+};
+
+enum atl_fw2_stat_offt {
+	atl_fw2_stat_temp = 0x50,
+	atl_fw2_stat_lcaps = 0x84,
+};
+
+enum atl_fc_mode {
+	atl_fc_none = 0,
+	atl_define_bit(atl_fc_rx, 0)
+	atl_define_bit(atl_fc_tx, 1)
+	atl_fc_full = atl_fc_rx | atl_fc_tx,
+};
+
+struct atl_fc_state {
+	enum atl_fc_mode req;
+	enum atl_fc_mode prev_req;
+	enum atl_fc_mode cur;
+};
+
+#define ATL_EEE_BIT_OFFT 16
+#define ATL_EEE_MASK ~(BIT(ATL_EEE_BIT_OFFT) - 1)
+
+struct atl_link_state{
+	/* The following three bitmaps use alt_link_types[] indices
+	 * as link bit positions. Conversion to/from ethtool bits is
+	 * done in atl_ethtool.c. */
+	unsigned supported;
+	unsigned advertized;
+	unsigned lp_advertized;
+	unsigned prev_advertized;
+	bool force_off;
+	bool autoneg;
+	bool eee;
+	bool eee_enabled;
+	struct atl_link_type *link;
+	struct atl_fc_state fc;
+};
+
+struct atl_fw_ops {
+	void (*set_link)(struct atl_hw *hw, bool force);
+	struct atl_link_type *(*check_link)(struct atl_hw *hw);
+	int (*wait_fw_init)(struct atl_hw *hw);
+	int (*get_link_caps)(struct atl_hw *hw);
+	int (*restart_aneg)(struct atl_hw *hw);
+	void (*set_default_link)(struct atl_hw *hw);
+	int (*enable_wol)(struct atl_hw *hw);
+	int (*get_phy_temperature)(struct atl_hw *hw, int *temp);
+	unsigned efuse_shadow_addr_reg;
+};
+
+int atl_read_fwstat_word(struct atl_hw *hw, uint32_t offt, uint32_t *val);
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.c
new file mode 100644
index 0000000..d6cdfb3
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.c
@@ -0,0 +1,598 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2018 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/etherdevice.h>
+#include "atl_common.h"
+#include "atl_desc.h"
+
+static const char *atl_fwd_dir_str(struct atl_fwd_ring *ring)
+{
+	return ring->flags & ATL_FWR_TX ? "Tx" : "Rx";
+}
+
+static int atl_fwd_ring_tx(struct atl_fwd_ring *ring)
+{
+	return !!(ring->flags & ATL_FWR_TX);
+}
+
+static int atl_fwd_get_page(struct atl_fwd_buf_page *bpg, struct device *dev,
+	int order)
+{
+	struct page *pg = dev_alloc_pages(order);
+	dma_addr_t daddr;
+
+	if (!pg)
+		return -ENOMEM;
+
+	daddr = dma_map_page(dev, pg, 0, PAGE_SIZE << order, DMA_FROM_DEVICE);
+
+	if (dma_mapping_error(dev, daddr)) {
+		__free_pages(pg, order);
+		return -ENOMEM;
+	}
+
+	bpg->daddr = daddr;
+	bpg->page = pg;
+
+	return 0;
+}
+
+static void atl_fwd_free_bufs(struct atl_fwd_ring *ring)
+{
+	struct atl_nic *nic = ring->nic;
+	struct device *dev = &nic->hw.pdev->dev;
+	struct atl_fwd_bufs *bufs = ring->bufs;
+	int ring_size = ring->hw.size;
+	int order = bufs->order;
+	int i;
+
+	if (!bufs)
+		return;
+
+	if (bufs->daddr_vec) {
+		dma_free_coherent(dev, ring_size * sizeof(dma_addr_t),
+			bufs->daddr_vec, bufs->daddr_vec_base);
+		kfree(bufs->vaddr_vec);
+	}
+
+
+	for (i = 0; i < bufs->num_pages; i++) {
+		struct atl_fwd_buf_page *bpg = &bufs->bpgs[i];
+
+		if (bpg->page) {
+			dma_unmap_page(dev, bpg->daddr,
+				PAGE_SIZE << order,
+				DMA_FROM_DEVICE);
+			__free_pages(bpg->page, order);
+		}
+	}
+
+	kfree(bufs);
+	ring->bufs = NULL;
+}
+
+static int atl_fwd_alloc_bufs(struct atl_fwd_ring *ring,
+	int order)
+{
+	struct atl_nic *nic = ring->nic;
+	int flags = ring->flags;
+	int ring_size = ring->hw.size;
+	int buf_size = ring->buf_size;
+	struct device *dev = &nic->hw.pdev->dev;
+	struct atl_fwd_buf_page *bpg;
+	struct atl_fwd_bufs *bufs;
+	int num_pages, i;
+	int ret;
+	unsigned int pg_off = 0;
+	bool want_vecs = !!(flags & ATL_FWR_WANT_BUF_VECS);
+
+	if (!(flags & ATL_FWR_ALLOC_BUFS))
+		return 0;
+
+	if (flags & ATL_FWR_CONTIG_BUFS) {
+		order = get_order(buf_size * ring_size);
+		num_pages = 1;
+	} else {
+		int bufs_per_page = (PAGE_SIZE << order) / buf_size;
+		num_pages = ring_size / bufs_per_page +
+			!!(ring_size % bufs_per_page);
+	}
+
+	bufs = kzalloc(sizeof(*bufs) +
+			sizeof(struct atl_fwd_buf_page) * num_pages,
+		GFP_KERNEL);
+	if (!bufs)
+		return -ENOMEM;
+
+	ring->bufs = bufs;
+	bufs->num_pages = num_pages;
+	bufs->order = order;
+
+	bpg = bufs->bpgs;
+	for (i = 0; i < num_pages; i++) {
+		ret = atl_fwd_get_page(&bpg[i], dev, order);
+		if (ret)
+			goto free;
+	}
+
+	if (want_vecs) {
+		ret = -ENOMEM;
+		bufs->daddr_vec = dma_alloc_coherent(dev,
+			ring_size * sizeof(dma_addr_t),
+			&bufs->daddr_vec_base, GFP_KERNEL);
+		if (!bufs->daddr_vec)
+			goto free;
+
+		bufs->vaddr_vec = kcalloc(ring_size, sizeof(void *),
+			GFP_KERNEL);
+		if (!bufs->vaddr_vec)
+			goto free;
+	} else {
+		bufs->daddr_vec_base = bpg[0].daddr;
+		bufs->vaddr_vec = page_to_virt(bpg[0].page);
+	}
+
+	bufs->paddr = page_to_phys(bpg[0].page);
+
+	bpg = bufs->bpgs;
+	for (i = 0; i < ring_size; i++) {
+		union atl_desc *desc = &ring->hw.descs[i];
+		dma_addr_t daddr = bpg->daddr + pg_off;
+
+		if (want_vecs) {
+			bufs->daddr_vec[i] = daddr;
+			bufs->vaddr_vec[i] = page_to_virt(bpg->page) + pg_off;
+		}
+
+		if (atl_fwd_ring_tx(ring))
+			desc->tx.daddr = daddr;
+		else
+			desc->rx.daddr = daddr;
+
+		pg_off += buf_size;
+		if (pg_off + buf_size <= (PAGE_SIZE << order))
+			continue;
+
+		bpg++;
+		pg_off = 0;
+	}
+
+	return 0;
+
+free:
+	atl_fwd_free_bufs(ring);
+	return ret;
+}
+
+static void atl_fwd_update_im(struct atl_fwd_ring *ring)
+{
+	struct atl_hw *hw = &ring->nic->hw;
+	int idx = ring->idx;
+	uint32_t addr;
+
+	addr = atl_fwd_ring_tx(ring) ? ATL_TX_INTR_MOD_CTRL(idx) :
+		ATL_RX_INTR_MOD_CTRL(idx);
+
+	atl_write(hw, addr, (ring->intr_mod_max / 2) << 0x10 |
+		(ring->intr_mod_min / 2) << 8 | 2);
+}
+
+static void atl_fwd_init_ring(struct atl_fwd_ring *fwd_ring)
+{
+	struct atl_hw *hw = &fwd_ring->nic->hw;
+	struct atl_hw_ring *ring = &fwd_ring->hw;
+	unsigned int flags = fwd_ring->flags;
+	int dir_tx = atl_fwd_ring_tx(fwd_ring);
+	int idx = fwd_ring->idx;
+	int lxo_bit = !!(flags & ATL_FWR_LXO);
+
+	atl_write(hw, ATL_RING_BASE_LSW(ring), ring->daddr);
+	atl_write(hw, ATL_RING_BASE_MSW(ring), ring->daddr >> 32);
+
+	if (dir_tx) {
+		atl_write(hw, ATL_TX_RING_THRESH(ring),
+			8 << 8 | 8 << 0x10 | 24 << 0x18);
+		atl_write(hw, ATL_TX_RING_CTL(ring), ring->size);
+
+		atl_write_bit(hw, ATL_TX_LSO_CTRL, idx, lxo_bit);
+	} else {
+		uint32_t ctrl = ring->size |
+			!!(flags & ATL_FWR_VLAN) << 29;
+
+		atl_write(hw, ATL_RX_RING_BUF_SIZE(ring),
+			fwd_ring->buf_size / 1024);
+		atl_write(hw, ATL_RX_RING_THRESH(ring),
+			8 << 0x10 | 24 << 0x18);
+		atl_write(hw, ATL_RX_RING_TAIL(ring), ring->size - 1);
+		atl_write(hw, ATL_RX_RING_CTL(ring), ctrl);
+
+		if (lxo_bit)
+			atl_write_bits(hw, ATL_RX_LRO_PKT_LIM(idx),
+				(idx & 7) * 4, 2, 3);
+
+		atl_write_bit(hw, ATL_RX_LRO_CTRL1, idx, lxo_bit);
+		atl_write_bit(hw, ATL_INTR_RSC_EN, idx, lxo_bit);
+	}
+
+	atl_fwd_update_im(fwd_ring);
+}
+
+void atl_fwd_release_ring(struct atl_fwd_ring *ring)
+{
+	struct atl_nic *nic = ring->nic;
+	int idx = ring->idx;
+	int dir_tx = atl_fwd_ring_tx(ring);
+	struct atl_fwd *fwd = &nic->fwd;
+	unsigned long *map = &fwd->ring_map[dir_tx];
+	struct atl_fwd_ring **rings = fwd->rings[dir_tx];
+
+	atl_fwd_disable_ring(ring);
+
+	if (ring->evt) {
+		atl_fwd_disable_event(ring->evt);
+		atl_fwd_release_event(ring->evt);
+	}
+
+	__clear_bit(idx, map);
+	rings[idx - ATL_FWD_RING_BASE] = NULL;
+	atl_fwd_free_bufs(ring);
+	atl_free_descs(nic, &ring->hw);
+	kfree(ring);
+}
+EXPORT_SYMBOL(atl_fwd_release_ring);
+
+static phys_addr_t atl_dma_coherent_virt_to_phys(void *vaddr)
+{
+	if (is_vmalloc_addr(vaddr))
+		return page_to_phys(vmalloc_to_page(vaddr));
+	else
+		return virt_to_phys(vaddr);
+}
+
+static unsigned int atl_fwd_rx_mod_max = 25, atl_fwd_rx_mod_min = 15,
+	atl_fwd_tx_mod_max = 25, atl_fwd_tx_mod_min = 15;
+atl_module_param(fwd_rx_mod_max, uint, 0644);
+atl_module_param(fwd_rx_mod_min, uint, 0644);
+atl_module_param(fwd_tx_mod_max, uint, 0644);
+atl_module_param(fwd_tx_mod_min, uint, 0644);
+
+struct atl_fwd_ring *atl_fwd_request_ring(struct net_device *ndev,
+	int flags, int ring_size, int buf_size, int page_order)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_fwd *fwd = &nic->fwd;
+	int dir_tx = !!(flags & ATL_FWR_TX);
+	unsigned long *map = &fwd->ring_map[dir_tx];
+	struct atl_fwd_ring **rings = fwd->rings[dir_tx], *ring;
+	int ret = -ENOMEM;
+	int idx;
+
+	if (ring_size & 7 || ring_size > ATL_MAX_RING_SIZE) {
+		atl_nic_err("%s: bad ring size %d, must be no more than %d "
+			"and a multiple of 8\n", __func__, ring_size,
+			ATL_MAX_RING_SIZE);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (buf_size & 1023 || buf_size > 16 * 1024) {
+		atl_nic_err("%s: bad buffer size %d, must be no more than 16k "
+			"and a multiple of 1024\n",
+			__func__, buf_size);
+		return ERR_PTR(-EINVAL);
+	}
+
+	idx = find_next_zero_bit(map, ATL_FWD_RING_BASE + ATL_NUM_FWD_RINGS,
+		ATL_FWD_RING_BASE);
+	if (idx >= ATL_FWD_RING_BASE + ATL_NUM_FWD_RINGS)
+		return ERR_PTR(ret);
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+	if (!ring)
+		return ERR_PTR(ret);
+
+	ring->nic = nic;
+	ring->idx = idx;
+	ring->flags = flags;
+	ring->hw.size = ring_size;
+	ring->buf_size = buf_size;
+
+	ret = atl_alloc_descs(nic, &ring->hw);
+	if (ret)
+		goto free_ring;
+
+	ring->hw.reg_base = dir_tx ? ATL_TX_RING(idx) : ATL_RX_RING(idx);
+
+	ret = atl_fwd_alloc_bufs(ring, page_order);
+	if (ret)
+		goto free_descs;
+
+	__set_bit(idx, map);
+	rings[idx - ATL_FWD_RING_BASE] = ring;
+
+	if (dir_tx) {
+		ring->intr_mod_max = atl_fwd_tx_mod_max;
+		ring->intr_mod_min = atl_fwd_tx_mod_min;
+	} else {
+		ring->intr_mod_max = atl_fwd_rx_mod_max;
+		ring->intr_mod_min = atl_fwd_rx_mod_min;
+	}
+
+	ring->desc_paddr = atl_dma_coherent_virt_to_phys(ring->hw.descs);
+
+	atl_fwd_init_ring(ring);
+	return ring;
+
+free_descs:
+	atl_free_descs(nic, &ring->hw);
+
+free_ring:
+	kfree(ring);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(atl_fwd_request_ring);
+
+int atl_fwd_set_ring_intr_mod(struct atl_fwd_ring *ring, int min, int max)
+{
+	if (atl_fwd_ring_tx(ring) && ring->evt &&
+		ring->evt->flags & ATL_FWD_EVT_TXWB) {
+		struct atl_nic *nic = ring->nic;
+
+		atl_nic_err("%s: Interrupt moderation not supported for head pointer writeback events\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (min >= 0)
+		ring->intr_mod_min = min;
+
+	if (max >= 0)
+		ring->intr_mod_max = max;
+
+	atl_fwd_update_im(ring);
+	return 0;
+}
+EXPORT_SYMBOL(atl_fwd_set_ring_intr_mod);
+
+void atl_fwd_release_rings(struct atl_nic *nic)
+{
+	struct atl_fwd_ring **rings = nic->fwd.rings[0];
+	int i;
+
+	for (i = 0; i < ATL_NUM_FWD_RINGS * 2; i++)
+		if (rings[i])
+			atl_fwd_release_ring(rings[i]);
+}
+
+static void atl_fwd_reset_ring(struct atl_fwd_ring *fwd_ring)
+{
+	struct atl_hw *hw = &fwd_ring->nic->hw;
+	struct atl_hw_ring *ring = &fwd_ring->hw;
+
+	atl_write(hw, ATL_RING_CTL(ring), BIT(19));
+	udelay(10);
+	atl_write(hw, ATL_RING_CTL(ring), 0);
+}
+
+int atl_fwd_enable_ring(struct atl_fwd_ring *ring)
+{
+	struct atl_hw *hw = &ring->nic->hw;
+
+	atl_set_bits(hw, ATL_RING_CTL(&ring->hw), BIT(31));
+	ring->state |= ATL_FWR_ST_ENABLED;
+
+	return 0;
+}
+EXPORT_SYMBOL(atl_fwd_enable_ring);
+
+void atl_fwd_disable_ring(struct atl_fwd_ring *ring)
+{
+	if (!(ring->state & ATL_FWR_ST_ENABLED))
+		return;
+
+	atl_fwd_reset_ring(ring);
+	atl_fwd_init_ring(ring);
+	ring->state &= ~ATL_FWR_ST_ENABLED;
+}
+EXPORT_SYMBOL(atl_fwd_disable_ring);
+
+static void __iomem *atl_msix_bar(struct atl_nic *nic)
+{
+	struct pci_dev *pdev = nic->hw.pdev;
+	struct msi_desc *msi;
+
+	if (!pdev->msix_enabled)
+		return NULL;
+
+	msi = list_first_entry(dev_to_msi_list(&pdev->dev),
+		struct msi_desc, list);
+	return msi->mask_base;
+}
+
+static int atl_fwd_set_msix_vec(struct atl_nic *nic, struct atl_fwd_event *evt)
+{
+	int idx = evt->idx;
+	uint64_t addr = evt->msi_addr;
+	uint32_t data = evt->msi_data;
+	uint32_t ctrl;
+	void __iomem *desc = atl_msix_bar(nic);
+
+	if (!desc)
+		return -EIO;
+
+	desc += idx * PCI_MSIX_ENTRY_SIZE;
+
+	/* MSI-X table updates must be atomic, so mask first */
+	ctrl = readl(desc + PCI_MSIX_ENTRY_VECTOR_CTRL);
+	writel(ctrl | PCI_MSIX_ENTRY_CTRL_MASKBIT,
+		desc + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
+	/* Program the vector */
+	writel(addr & (BIT_ULL(32) - 1), desc + PCI_MSIX_ENTRY_LOWER_ADDR);
+	writel(addr >> 32, desc + PCI_MSIX_ENTRY_UPPER_ADDR);
+	writel(data, desc + PCI_MSIX_ENTRY_DATA);
+
+	/* Unmask */
+	writel(ctrl & ~PCI_MSIX_ENTRY_CTRL_MASKBIT,
+		desc + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
+	return 0;
+}
+
+void atl_fwd_release_event(struct atl_fwd_event *evt)
+{
+	struct atl_fwd_ring *ring = evt->ring;
+	struct atl_nic *nic = ring->nic;
+	unsigned long *map = &nic->fwd.msi_map;
+	int idx = evt->idx;
+
+	if (ring->evt != evt) {
+		atl_nic_err("%s: attempt to release unset event\n", __func__);
+		return;
+	}
+
+	atl_fwd_disable_event(evt);
+
+	if (evt->flags & ATL_FWD_EVT_TXWB)
+		return;
+
+	__clear_bit(idx, map);
+	atl_set_intr_bits(&nic->hw, ring->idx,
+		atl_fwd_ring_tx(ring) ? -1 : ATL_NUM_MSI_VECS,
+		atl_fwd_ring_tx(ring) ? ATL_NUM_MSI_VECS : -1);
+}
+EXPORT_SYMBOL(atl_fwd_release_event);
+
+int atl_fwd_request_event(struct atl_fwd_event *evt)
+{
+	struct atl_fwd_ring *ring = evt->ring;
+	int dir_tx = atl_fwd_ring_tx(ring);
+	struct atl_nic *nic = ring->nic;
+	struct atl_hw *hw = &nic->hw;
+	unsigned long *map = &nic->fwd.msi_map;
+	bool tx_wb = !!(evt->flags & ATL_FWD_EVT_TXWB);
+	int idx;
+	int ret;
+
+	if (ring->evt) {
+		atl_nic_err("%s: event already set for %s ring %d\n",
+			__func__, atl_fwd_dir_str(ring), ring->idx);
+		return -EEXIST;
+	}
+
+	if (!tx_wb && !(nic->flags & ATL_FL_MULTIPLE_VECTORS)) {
+		atl_nic_err("%s: MSI-X interrupts are disabled\n", __func__);
+		return -EINVAL;
+	}
+
+	if (tx_wb && !atl_fwd_ring_tx(ring)) {
+		atl_nic_err("%s: head pointer writeback events supported "
+			"on Tx rings only\n", __func__);
+		return -EINVAL;
+	}
+
+	if ((evt->flags & (ATL_FWD_EVT_TXWB | ATL_FWD_EVT_AUTOMASK)) ==
+		(ATL_FWD_EVT_TXWB | ATL_FWD_EVT_AUTOMASK)) {
+		atl_nic_err("%s: event automasking supported "
+			"for MSI events only\n", __func__);
+		return -EINVAL;
+	}
+
+	ring->evt = evt;
+
+	if (tx_wb) {
+		struct atl_hw_ring *hwring = &ring->hw;
+
+		atl_write(hw, ATL_TX_RING_HEAD_WB_LSW(hwring),
+			evt->tx_head_wrb);
+		atl_write(hw, ATL_TX_RING_HEAD_WB_MSW(hwring),
+			evt->tx_head_wrb >> 32);
+		return 0;
+	}
+
+	idx = find_next_zero_bit(map, ATL_NUM_MSI_VECS, ATL_FWD_MSI_BASE);
+	if (idx >= ATL_NUM_MSI_VECS) {
+		atl_nic_err("%s: no MSI vectors left\n", __func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	evt->idx = idx;
+
+	ret = atl_fwd_set_msix_vec(nic, evt);
+	if (ret)
+		goto fail;
+
+	__set_bit(idx, map);
+
+	atl_set_intr_bits(&nic->hw, ring->idx,
+		dir_tx ? -1 : idx,
+		dir_tx ? idx : -1);
+
+	atl_write_bit(hw, ATL_INTR_AUTO_CLEAR, idx, 1);
+	atl_write_bit(hw, ATL_INTR_AUTO_MASK, idx,
+		!!(evt->flags & ATL_FWD_EVT_AUTOMASK));
+
+	return 0;
+
+fail:
+	ring->evt = NULL;
+	return ret;
+}
+EXPORT_SYMBOL(atl_fwd_request_event);
+
+int atl_fwd_enable_event(struct atl_fwd_event *evt)
+{
+	struct atl_fwd_ring *ring = evt->ring;
+	struct atl_hw *hw = &ring->nic->hw;
+
+	if (evt->flags & ATL_FWD_EVT_TXWB) {
+		if (ring->state & ATL_FWR_ST_ENABLED)
+			return -EINVAL;
+
+		atl_write_bit(hw, ATL_TX_RING_CTL(&ring->hw), 28, 1);
+		return 0;
+	}
+
+	atl_intr_enable(hw, BIT(evt->idx));
+	return 0;
+}
+EXPORT_SYMBOL(atl_fwd_enable_event);
+
+int atl_fwd_disable_event(struct atl_fwd_event *evt)
+{
+	struct atl_fwd_ring *ring = evt->ring;
+	struct atl_hw *hw = &ring->nic->hw;
+
+	if (evt->flags & ATL_FWD_EVT_TXWB) {
+		if (ring->state & ATL_FWR_ST_ENABLED)
+			return -EINVAL;
+
+		atl_write_bit(hw, ATL_TX_RING_CTL(&ring->hw), 28, 0);
+		return 0;
+	}
+
+	atl_intr_disable(hw, BIT(evt->idx));
+	return 0;
+}
+EXPORT_SYMBOL(atl_fwd_disable_event);
+
+int atl_fwd_receive_skb(struct net_device *ndev, struct sk_buff *skb)
+{
+	skb->protocol = eth_type_trans(skb, ndev);
+	return netif_rx(skb);
+}
+EXPORT_SYMBOL(atl_fwd_receive_skb);
+
+int atl_fwd_transmit_skb(struct net_device *ndev, struct sk_buff *skb)
+{
+	skb->dev = ndev;
+	return dev_queue_xmit(skb);
+}
+EXPORT_SYMBOL(atl_fwd_transmit_skb);
+
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.h
new file mode 100644
index 0000000..a6b2658
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_fwd.h
@@ -0,0 +1,300 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2018 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_FWD_H_
+#define _ATL_FWD_H_
+
+#include "atl_common.h"
+
+struct atl_fwd_event;
+
+struct atl_fwd_buf_page {
+	struct page *page;
+	dma_addr_t daddr;
+};
+
+/**
+ *	atl_fwd_rxbufs - offload engine's ring's Rx buffers
+ *
+ *	Buffers are allocated by the driver when a ring is created
+ *
+ *	The entire buffer space for the ring may optionally be
+ *	allocated as a single physically-contiguous block.
+ *
+ *	Descriptors are overwritten with the write-back descriptor
+ *	format on Rx and optionally on Tx. To simplify Rx descriptor
+ *	refill by the offload engine, vectors containing virtual addresses and
+ *	DMA-addresses of each buffer are provided in @vaddr_vec and
+ *	@daddr_vec respectively if @ATL_FWR_WANT_BUF_VECS flag is set
+ *	on @atl_fwd_request_ring().
+ *
+ *	If @ATL_FWR_WANT_BUF_VECS is not set, @daddr_vec_base contains
+ *	the DMA address of the first buffer page and @vaddr_vec
+ *	contains its virtual address.
+ *
+ *	@daddr_vec_base:	DMA address of the base of the @daddr_vec
+ *    	@daddr_vec:		A vector of buffers' DMA ddresses
+ *    	@vaddr_vec:		A vector of buffers' virtual addresses
+ *    				or first buffer's virtual address
+ *    				depending on ring flags
+ *    	@paddr:			Physical address of the first (or
+ *    				only) buffer page
+ */
+struct atl_fwd_bufs {
+	dma_addr_t daddr_vec_base;
+	dma_addr_t *daddr_vec;
+	void **vaddr_vec;
+	phys_addr_t paddr;
+
+	/* The following is not part of API and subject to change */
+	int num_pages;
+	int order;
+	struct atl_fwd_buf_page bpgs[0];
+};
+
+union atl_desc;
+
+/**
+ * 	atl_hw_ring - low leverl descriptor ring structure
+ *
+ * 	@descs:		Pointer to the descriptor ring
+ * 	@size:		Number of descriptors in the ring
+ * 	@reg_base:	Offset of ring's register block from start of
+ * 			BAR 0
+ * 	@daddr:		DMA address of the ring
+ */
+/* atl_hw_ring defined in "atl_hw.h" */
+
+/**
+ *	atl_fwd_ring - Offload engine-controlled ring
+ *
+ *	Buffer space is allocated by the driver on ring creation.
+ *
+ *	@hw:    	Low-level ring information
+ *	@evt:		Ring's event, either an MSI-X vector (either
+ *			Tx or Rx) or head pointer writeback address
+ *			(Tx ring only). NULL on ring allocation, set
+ *			by atl_fwd_request_event()
+ *	@bufs:		Ring's buffers. Allocated only if
+ *			@ATL_FWR_ALLOC_BUFS flag is set on ring
+ *			request.
+ *	@nic:		struct atl_nic backreference
+ *	@idx:		Ring index
+ *	@desc_paddr:	Physical address of the descriptor ring
+ */
+struct atl_fwd_ring {
+	struct atl_hw_ring hw;
+	struct atl_fwd_event *evt;
+	struct atl_fwd_bufs *bufs;
+	struct atl_nic *nic;
+	int idx;
+	phys_addr_t desc_paddr;
+
+	/* The following is not part of API and subject to change */
+	unsigned int flags;
+	unsigned long state;
+	int buf_size;
+	unsigned intr_mod_min;
+	unsigned intr_mod_max;
+};
+
+enum atl_fwd_event_flags {
+	ATL_FWD_EVT_TXWB = BIT(0), /* Event type: 0 for MSI, 1 for Tx
+				    * head WB */
+	ATL_FWD_EVT_AUTOMASK = BIT(1), /* Disable event after
+					* raising, MSI only. */
+};
+
+/**
+ * 	atl_fwd_event - Ring's notification event
+ *
+ * 	@flags		Event type and flags
+ * 	@ring		Ring backreference
+ * 	@msi_addr	MSI message address
+ * 	@msi_data	MSI message data
+ * 	@idx		MSI index (0 .. 31)
+ * 	@tx_head_wrb	Tx head writeback location
+ */
+struct atl_fwd_event {
+	enum atl_fwd_event_flags flags;
+	struct atl_fwd_ring *ring;
+	union {
+		struct {
+			dma_addr_t msi_addr;
+			uint32_t msi_data;
+			int idx;
+		};
+		dma_addr_t tx_head_wrb;
+	};
+};
+
+enum atl_fwd_ring_flags {
+	ATL_FWR_TX = BIT(0),	/* Direction: 0 for Rx, 1 for Tx */
+	ATL_FWR_VLAN = BIT(1),	/* Enable VLAN tag stripping / insertion */
+	ATL_FWR_LXO = BIT(2),	/* Enable LRO / LSO */
+	ATL_FWR_ALLOC_BUFS = BIT(3), /* Allocate buffers */
+	ATL_FWR_CONTIG_BUFS = BIT(4), /* Alloc buffers as physically
+				       * contiguous. May fail if
+				       * total buffer space required
+				       * is larger than a max-order
+				       * compound page. */
+	ATL_FWR_WANT_BUF_VECS = BIT(5), /* Alloc and fill per-buffer
+					 * DMA and virt address
+					 * vectors. If unset, first
+					 * buffer's daddr and vaddr
+					 * are provided in ring's
+					 * @daddr_vec_base and @vaddr_vec */
+};
+
+/**
+ * atl_fwd_request_ring() - Create a ring for an offload engine
+ *
+ * 	@ndev:		network device
+ * 	@flags:		ring flags
+ * 	@ring_size:	number of descriptors
+ * 	@buf_size:	individual buffer's size
+ * 	@page_order:	page order to use when @ATL_FWR_CONTIG_BUFS is
+ * 			not set
+ *
+ * atl_fwd_request_ring() creates a ring for an offload engine,
+ * allocates buffer memory if @ATL_FWR_ALLOC_BUFS flag is set,
+ * initializes ring's registers and fills the address fields in
+ * descriptors. Ring is inactive until explicitly enabled via
+ * atl_fwd_enable_ring().
+ *
+ * Buffers can be allocated either as a single physically-contiguous
+ * compound page, or as a sequence of compound pages of @page_order
+ * order. In the latter case, depending on the requested buffer size,
+ * tweaking the page order allows to pack buffers into buffer pages
+ * with less wasted space.
+ *
+ * Returns the ring pointer on success, ERR_PTR(error code) on failure
+ */
+struct atl_fwd_ring *atl_fwd_request_ring(struct net_device *ndev,
+	int flags, int ring_size, int buf_size, int page_order);
+
+/**
+ * atl_fwd_release_ring() - Free offload engine's ring
+ *
+ * 	@ring:	ring to be freed
+ *
+ * Stops the ring, frees buffers if they were allocated, disables and
+ * releases ring's event if non-NULL, and frees the ring.
+ */
+void atl_fwd_release_ring(struct atl_fwd_ring *ring);
+
+/**
+ * atl_fwd_set_ring_intr_mod() - Set ring's interrupt moderation
+ * delays
+ *
+ * 	@ring:	ring
+ * 	@min:	min delay
+ * 	@max:	max delay
+ *
+ * Each ring has two configurable interrupt moderation timers. When an
+ * interrupt condition occurs (write-back of the final descriptor of a
+ * packet on receive, or writeback on a transmit descriptor with WB
+ * bit set), the min timer is restarted unconditionally and max timer
+ * is started only if it's not running yet. When any of the timers
+ * expires, the interrupt is signalled.
+ *
+ * Thus if a single interrupt event occurs it will be subjected to min
+ * delay. If subsequent events keep occuring with intervals less than
+ * min_delay between each other, the interrupt will be triggered
+ * max_delay after the initial event.
+ *
+ * When called with negative @min or @max, the corresponding setting
+ * is left unchanged.
+ *
+ * Interrupt moderation is only supported for MSI-X vectors, not head
+ * pointer writeback events.
+ *
+ * Returns 0 on success or -EINVAL on attempt to set moderation delays
+ * for a ring with attached Tx WB event.
+ */
+int atl_fwd_set_ring_intr_mod(struct atl_fwd_ring *ring, int min, int max);
+
+/**
+ * atl_fwd_enable_channel() - Enable offload engine's ring
+ *
+ * 	@ring: ring to be enabled
+ *
+ * Starts the ring. Returns 0 on success or negative error code.
+ */
+int atl_fwd_enable_ring(struct atl_fwd_ring *ring);
+/**
+ * atl_fwd_disable_channel() - Disable offload engine's ring
+ *
+ * 	@ring: ring to be disabled
+ *
+ * Stops and resets the ring. On next ring enable head and tail
+ * pointers will be zero.
+ */
+void atl_fwd_disable_ring(struct atl_fwd_ring *ring);
+
+/**
+ * atl_fwd_request_event() - Creates and attaches a ring notification
+ * event
+ *
+ * 	@evt:		event structure
+ *
+ * Caller must allocate a struct atl_fwd_event and fill the @flags,
+ * @ring and either @tx_head_wrb or @msi_addr and @msi_data depending
+ * on the type bit in @flags. Event is created in disabled state.
+ *
+ * For an MSI event type, an MSI vector table slot is
+ * allocated and programmed, and it's index is saved in @evt->idx.
+ *
+ * @evt is then attached to the ring.
+ *
+ * Returns 0 on success or negative error code.
+ */
+int atl_fwd_request_event(struct atl_fwd_event *evt);
+
+/**
+ * atl_fwd_release_event() - Release a ring notification event
+ *
+ * 	@evt:		event structure
+ *
+ * Disables the event if enabled, frees the MSI vector for an MSI-type
+ * event and detaches @evt from the ring. The @evt structure itself is
+ * not freed.
+ */
+void atl_fwd_release_event(struct atl_fwd_event *evt);
+
+/**
+ * atl_fwd_enable_event() - Enable a ring event
+ *
+ * 	@evt:		event structure
+ *
+ * Enables the event.
+ *
+ * Returns 0 on success or negative error code.
+ */
+int atl_fwd_enable_event(struct atl_fwd_event *evt);
+
+/**
+ * atl_fwd_disable_event() - Disable a ring event
+ *
+ * 	@evt:		event structure
+ *
+ * Disables the event.
+ *
+ * Returns 0 on success or negative error code.
+ */
+int atl_fwd_disable_event(struct atl_fwd_event *evt);
+
+int atl_fwd_receive_skb(struct net_device *ndev, struct sk_buff *skb);
+int atl_fwd_transmit_skb(struct net_device *ndev, struct sk_buff *skb);
+
+enum atl_fwd_ring_state {
+	ATL_FWR_ST_ENABLED = BIT(0),
+};
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.c
new file mode 100644
index 0000000..1afd7b7
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.c
@@ -0,0 +1,1042 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+
+#include "atl_common.h"
+#include "atl_hw.h"
+#include "atl_ring.h"
+
+struct atl_board_info {
+	unsigned int link_mask;
+};
+
+static struct atl_board_info atl_boards[] = {
+	[ATL_UNKNOWN] = {
+		.link_mask = 0x1f,
+	},
+	[ATL_AQC107] = {
+		.link_mask = 0x1f,
+	},
+	[ATL_AQC108] = {
+		.link_mask = 0xf,
+	},
+	[ATL_AQC109] = {
+		.link_mask = 7,
+	},
+	[ATL_AQC100] = {
+		.link_mask = 0x1f,
+	},
+};
+
+static void atl_unplugged(struct atl_hw *hw)
+{
+	if (!hw->regs)
+		return;
+	hw->regs = 0;
+	dev_err(&hw->pdev->dev, "Device removed\n");
+}
+
+void atl_check_unplug(struct atl_hw *hw, uint32_t addr)
+{
+	uint32_t val;
+
+	if (addr == ATL_GLOBAL_MIF_ID) {
+		atl_unplugged(hw);
+		return;
+	}
+
+	val = atl_read(hw, ATL_GLOBAL_MIF_ID);
+	if (val == 0xffffffff)
+		atl_unplugged(hw);
+}
+
+int atl_read_mcp_mem(struct atl_hw *hw, uint32_t mcp_addr, void *host_addr,
+		      unsigned int size)
+{
+	uint32_t *addr = (uint32_t *)host_addr;
+
+	size = (size + 3) & ~3u;
+	atl_write(hw, ATL_GLOBAL_MBOX_ADDR, mcp_addr);
+	while (size) {
+		uint32_t next;
+
+		atl_write(hw, ATL_GLOBAL_MBOX_CTRL, 0x8000);
+
+		busy_wait(100, udelay(10), next,
+			  atl_read(hw, ATL_GLOBAL_MBOX_ADDR), next == mcp_addr);
+		if (next == mcp_addr) {
+			atl_dev_err("mcp mem read timed out (%d remaining)\n",
+				    size);
+			return -EIO;
+		}
+		*addr = atl_read(hw, ATL_GLOBAL_MBOX_DATA);
+		mcp_addr += 4;
+		addr++;
+		size -= 4;
+	}
+	return 0;
+}
+
+
+static inline void atl_glb_soft_reset(struct atl_hw *hw)
+{
+	atl_write_bit(hw, ATL_GLOBAL_STD_CTRL, 14, 0);
+	atl_write_bit(hw, ATL_GLOBAL_STD_CTRL, 15, 1);
+}
+
+static inline void atl_glb_soft_reset_full(struct atl_hw *hw)
+{
+	atl_write_bit(hw, ATL_TX_CTRL1, 29, 0);
+	atl_write_bit(hw, ATL_RX_CTRL1, 29, 0);
+	atl_write_bit(hw, ATL_INTR_CTRL, 29, 0);
+	atl_write_bit(hw, ATL_MPI_CTRL1, 29, 0);
+	atl_glb_soft_reset(hw);
+}
+
+static int atl_hw_reset_nonrbl(struct atl_hw *hw)
+{
+	uint32_t tries;
+	uint32_t reg = atl_read(hw, ATL_GLOBAL_DAISY_CHAIN_STS1);
+
+	bool daisychain_running = (reg & 0x30) != 0x30;
+
+	if (daisychain_running)
+		atl_dev_dbg("AQDBG: daisychain running (0x18: %#x)\n",
+			    atl_read(hw, ATL_GLOBAL_FW_IMAGE_ID));
+
+	atl_write(hw, 0x404, 0x40e1);
+	mdelay(50);
+
+	atl_write(hw, 0x534, 0xa0);
+	atl_write(hw, 0x100, 0x9f);
+	atl_write(hw, 0x100, 0x809f);
+	mdelay(50);
+
+	atl_glb_soft_reset(hw);
+
+	atl_write(hw, 0x404, 0x80e0);
+	atl_write(hw, 0x32a8, 0);
+	atl_write(hw, 0x520, 1);
+	mdelay(50);
+	atl_write(hw, 0x404, 0x180e0);
+
+	tries = busy_wait(10000, mdelay(1), reg, atl_read(hw, 0x704),
+		!(reg & 0x10));
+	if (!(reg & 0x10)) {
+		atl_dev_err("FLB kickstart timed out: %#x\n", reg);
+		return -EIO;
+	}
+	atl_dev_dbg("FLB kickstart took %d ms\n", tries);
+
+	atl_write(hw, 0x404, 0x80e0);
+	mdelay(50);
+	atl_write(hw, 0x3a0, 1);
+
+	atl_glb_soft_reset_full(hw);
+
+	return atl_fw_init(hw);
+}
+
+int atl_hw_reset(struct atl_hw *hw)
+{
+	uint32_t reg = atl_read(hw, ATL_MCP_SCRATCH(RBL_STS));
+	uint32_t flb_stat = atl_read(hw, ATL_GLOBAL_DAISY_CHAIN_STS1);
+	int tries = 0;
+	/* bool host_load_done = false; */
+
+	while (!reg && flb_stat == 0x6000000 && tries++ < 1000) {
+		mdelay(1);
+		reg = atl_read(hw, ATL_MCP_SCRATCH(RBL_STS));
+		flb_stat = atl_read(hw, ATL_GLOBAL_DAISY_CHAIN_STS1);
+	}
+
+	atl_dev_dbg("0x388: %#x 0x704: %#x\n", reg, flb_stat);
+	if (tries >= 1000) {
+		atl_dev_err("Timeout waiting to choose RBL or FLB path\n");
+		return -EIO;
+	}
+
+	if (!reg)
+		return atl_hw_reset_nonrbl(hw);
+
+	atl_write(hw, 0x404, 0x40e1);
+	atl_write(hw, 0x3a0, 1);
+	atl_write(hw, 0x32a8, 0);
+
+	atl_write(hw, ATL_MCP_SCRATCH(RBL_STS), 0xdead);
+
+	atl_glb_soft_reset_full(hw);
+
+	atl_write(hw, ATL_GLOBAL_CTRL2, 0x40e0);
+
+	for (tries = 0; tries < 10000; mdelay(1)) {
+		tries++;
+		reg = atl_read(hw, ATL_MCP_SCRATCH(RBL_STS)) & 0xffff;
+
+		if (!reg || reg == 0xdead)
+			continue;
+
+		/* if (reg != 0xf1a7) */
+			break;
+
+		/* if (host_load_done) */
+		/* 	continue; */
+
+		/* ret = atl_load_mac_fw(hw); */
+		/* if (ret) { */
+		/* 	atl_dev_err("MAC FW host load failed\n"); */
+		/* 	return ret; */
+		/* } */
+		/* host_load_done = true; */
+	}
+
+	if (reg == 0xf1a7) {
+		atl_dev_err("MAC FW Host load not supported yet\n");
+		return -EIO;
+	}
+	if (!reg || reg == 0xdead) {
+		atl_dev_err("RBL restart timeout: %#x\n", reg);
+		return -EIO;
+	}
+	atl_dev_dbg("RBL restart took %d ms result %#x\n", tries, reg);
+
+	/* if (host_load_done) { */
+	/* 	// Wait for MAC FW to decide whether it wants to reload the PHY FW */
+	/* 	busy_wait(10, mdelay(1), reg, atl_read(hw, 0x340), !(reg & (1 << 9 | 1 << 1 | 1 << 0))); */
+
+	/* 	if (reg & 1 << 9) { */
+	/* 		ret = atl_load_phy_fw(hw); */
+	/* 		if (ret) { */
+	/* 			atl_dev_err("PHY FW host load failed\n"); */
+	/* 			return ret; */
+	/* 		} */
+	/* 	} */
+	/* } */
+
+	return atl_fw_init(hw);
+}
+
+static int atl_get_mac_addr(struct atl_hw *hw, uint8_t *buf)
+{
+	uint32_t efuse_shadow_addr =
+		atl_read(hw, hw->mcp.ops->efuse_shadow_addr_reg);
+	uint8_t tmp[8];
+	int ret;
+
+	if (!efuse_shadow_addr)
+		return false;
+
+	ret = atl_read_mcp_mem(hw, efuse_shadow_addr + 40 * 4, tmp, 8);
+	*(uint32_t *)buf = htonl(*(uint32_t *)tmp);
+	*(uint16_t *)&buf[4] = (uint16_t)htonl(*(uint32_t *)&tmp[4]);
+
+	return ret;
+}
+
+int atl_hwinit(struct atl_nic *nic, enum atl_board brd_id)
+{
+	struct atl_hw *hw = &nic->hw;
+	struct atl_board_info *brd = &atl_boards[brd_id];
+	int ret;
+
+	/* Default supported speed set based on device id. */
+	hw->link_state.supported = brd->link_mask;
+
+	ret = atl_hw_reset(hw);
+
+	atl_dev_info("rev 0x%x chip 0x%x FW img 0x%x\n",
+		 atl_read(hw, ATL_GLOBAL_CHIP_REV) & 0xffff,
+		 atl_read(hw, ATL_GLOBAL_CHIP_ID) & 0xffff,
+		 atl_read(hw, ATL_GLOBAL_FW_IMAGE_ID));
+
+	if (ret)
+		return ret;
+
+	ret = atl_get_mac_addr(hw, hw->mac_addr);
+	if (ret) {
+		atl_dev_err("couldn't read MAC address\n");
+		return ret;
+	}
+
+	return hw->mcp.ops->get_link_caps(hw);
+}
+
+static void atl_rx_xoff_set(struct atl_nic *nic, bool fc)
+{
+	struct atl_hw *hw = &nic->hw;
+
+	atl_write_bit(hw, ATL_RX_PBUF_REG2(0), 31, fc);
+}
+
+void atl_refresh_link(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+	struct atl_link_type *link, *prev_link = hw->link_state.link;
+
+	link = hw->mcp.ops->check_link(hw);
+
+	if (link) {
+		if (link != prev_link)
+			atl_nic_info("Link up: %s\n", link->name);
+		netif_carrier_on(nic->ndev);
+	} else {
+		if (link != prev_link)
+			atl_nic_info("Link down\n");
+		netif_carrier_off(nic->ndev);
+	}
+	atl_rx_xoff_set(nic, !!(hw->link_state.fc.cur & atl_fc_rx));
+}
+
+static irqreturn_t atl_link_irq(int irq, void *priv)
+{
+	struct atl_nic *nic = (struct atl_nic *)priv;
+
+	atl_schedule_work(nic);
+	atl_intr_enable(&nic->hw, BIT(0));
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t atl_legacy_irq(int irq, void *priv)
+{
+	struct atl_nic *nic = priv;
+	struct atl_hw *hw = &nic->hw;
+	uint32_t mask = hw->intr_mask | atl_qvec_intr(nic->qvecs);
+	uint32_t stat;
+
+
+	stat = atl_read(hw, ATL_INTR_STS);
+
+	/* Mask asserted intr sources */
+	atl_intr_disable(hw, stat);
+
+	if (!(stat & mask))
+		/* Interrupt from another device on a shared int
+		 * line. As no status bits were set, nothing was
+		 * masked above, so no need to unmask anything. */
+		return IRQ_NONE;
+
+	if (likely(stat & BIT(ATL_NUM_NON_RING_IRQS)))
+		/* Only one qvec when using legacy interrupts */
+		atl_ring_irq(irq, &nic->qvecs[0].napi);
+
+	if (unlikely(stat & BIT(0)))
+		atl_link_irq(irq, nic);
+	return IRQ_HANDLED;
+}
+
+int atl_alloc_link_intr(struct atl_nic *nic)
+{
+	struct pci_dev *pdev = nic->hw.pdev;
+	int ret;
+
+	if (nic->flags & ATL_FL_MULTIPLE_VECTORS) {
+		ret = request_irq(pci_irq_vector(pdev, 0), atl_link_irq, 0,
+		nic->ndev->name, nic);
+		if (ret)
+			atl_nic_err("request MSI link vector failed: %d\n",
+				-ret);
+		return ret;
+	}
+
+	ret = request_irq(pci_irq_vector(pdev, 0), atl_legacy_irq, IRQF_SHARED,
+		nic->ndev->name, nic);
+	if (ret)
+		atl_nic_err("request legacy irq failed: %d\n", -ret);
+
+	return ret;
+}
+
+void atl_free_link_intr(struct atl_nic *nic)
+{
+	free_irq(pci_irq_vector(nic->hw.pdev, 0), nic);
+}
+
+void atl_set_uc_flt(struct atl_hw *hw, int idx, uint8_t mac_addr[ETH_ALEN])
+{
+	atl_write(hw, ATL_RX_UC_FLT_REG1(idx),
+		be32_to_cpu(*(uint32_t *)&mac_addr[2]));
+	atl_write(hw, ATL_RX_UC_FLT_REG2(idx),
+		(uint32_t)be16_to_cpu(*(uint16_t *)mac_addr) |
+		1 << 16 | 1 << 31);
+}
+
+static void atl_disable_uc_flt(struct atl_hw *hw, int idx)
+{
+	atl_write(hw, ATL_RX_UC_FLT_REG2(idx), 0);
+}
+
+void atl_set_rss_key(struct atl_hw *hw)
+{
+	int i;
+	uint32_t val;
+
+	for (i = 0; i < ATL_RSS_KEY_SIZE / 4; i++) {
+		val = swab32(((uint32_t *)hw->rss_key)[i]);
+		atl_write(hw, ATL_RX_RSS_KEY_WR_DATA, val);
+		atl_write(hw, ATL_RX_RSS_KEY_ADDR, i | BIT(5));
+		busy_wait(100, udelay(1), val,
+			atl_read(hw, ATL_RX_RSS_KEY_ADDR),
+			val & BIT(5));
+		if (val & BIT(5)) {
+			atl_dev_err("Timeout writing RSS key[%d]: %#x\n",
+				i, val);
+			return;
+		}
+	}
+}
+
+void atl_set_rss_tbl(struct atl_hw *hw)
+{
+	int i, shift = 0, addr = 0;
+	uint32_t val = 0, stat;
+
+	for (i = 0; i < ATL_RSS_TBL_SIZE; i++) {
+		val |= (uint32_t)(hw->rss_tbl[i]) << shift;
+		shift += 3;
+
+		if (shift < 16)
+			continue;
+
+		atl_write(hw, ATL_RX_RSS_TBL_WR_DATA, val & 0xffff);
+		atl_write(hw, ATL_RX_RSS_TBL_ADDR, addr | BIT(4));
+
+		busy_wait(100, udelay(1), stat,
+			atl_read(hw, ATL_RX_RSS_TBL_ADDR), stat & BIT(4));
+		if (stat & BIT(4)) {
+			atl_dev_err("Timeout writing RSS redir table[%d] (addr %d): %#x\n",
+				    i, addr, stat);
+			return;
+		}
+
+		shift -= 16;
+		val >>= 16;
+		addr++;
+	}
+}
+
+unsigned int atl_fwd_rx_buf_reserve = 0, atl_fwd_tx_buf_reserve = 0;
+module_param_named(fwd_tx_buf_reserve, atl_fwd_tx_buf_reserve, uint, 0444);
+module_param_named(fwd_rx_buf_reserve, atl_fwd_rx_buf_reserve, uint, 0444);
+
+void atl_start_hw_global(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+
+	/* Enable TPO2 */
+	atl_write(hw, 0x7040, 0x10000);
+	/* Enable RPF2, filter logic 3 */
+	atl_write(hw, 0x5040, BIT(16) | (3 << 17));
+
+	/* Alloc TPB */
+	/* TC1: space for offload engine iface */
+	atl_write(hw, ATL_TX_PBUF_REG1(1), atl_fwd_tx_buf_reserve);
+	/* TC0: 160k minus TC1 size */
+	atl_write(hw, ATL_TX_PBUF_REG1(0), 160 - atl_fwd_tx_buf_reserve);
+	/* 4-TC | Enable TPB */
+	atl_set_bits(hw, ATL_TX_PBUF_CTRL1, BIT(8) | BIT(0));
+
+	/* Alloc RPB */
+	/* TC1: space for offload engine iface */
+	atl_write(hw, ATL_RX_PBUF_REG1(1), atl_fwd_rx_buf_reserve);
+	atl_write(hw, ATL_RX_PBUF_REG2(1), BIT(31) |
+		(atl_fwd_rx_buf_reserve * 32 * 66 / 100) << 16 |
+		(atl_fwd_rx_buf_reserve * 32 * 50 / 100));
+	/* TC1: 320k minus TC1 size */
+	atl_write(hw, ATL_RX_PBUF_REG1(0), 320 - atl_fwd_rx_buf_reserve);
+	atl_write(hw, ATL_RX_PBUF_REG2(0), BIT(31) |
+		((320 - atl_fwd_rx_buf_reserve) * 32 * 66 / 100) << 16 |
+		((320 - atl_fwd_rx_buf_reserve) * 32 * 50 / 100));
+	/* 4-TC | Enable RPB */
+	atl_set_bits(hw, ATL_RX_PBUF_CTRL1, BIT(8) | BIT(4) | BIT(0));
+
+	/* TPO */
+	/* Enable L3 | L4 chksum */
+	atl_set_bits(hw, ATL_TX_PO_CTRL1, 3);
+	/* TSO TCP flags bitmask first / middle */
+	atl_write(hw, ATL_TX_LSO_TCP_CTRL1, 0x0ff60ff6);
+	/* TSO TCP flags bitmask last */
+	atl_write(hw, ATL_TX_LSO_TCP_CTRL2, 0xf7f);
+
+	/* RPO */
+	/* Enable  L3 | L4 chksum */
+	atl_set_bits(hw, ATL_RX_PO_CTRL1, 3);
+	atl_write_bits(hw, ATL_RX_LRO_CTRL2, 12, 2, 0);
+	atl_write_bits(hw, ATL_RX_LRO_CTRL2, 5, 2, 0);
+	/* 10uS base, 20uS inactive timeout, 60 uS max coalescing
+	 * interval
+	 */
+	atl_write(hw, ATL_RX_LRO_TMRS, 0xc35 << 20 | 2 << 10 | 6);
+	atl_write(hw, ATL_INTR_RSC_DELAY, (atl_min_intr_delay / 2) - 1);
+
+	/* RPF */
+	/* Default RPF2 parser options */
+	atl_write(hw, ATL_RX_FLT_CTRL2, 0x0);
+	atl_set_uc_flt(hw, 0, hw->mac_addr);
+	/* BC action host */
+	atl_write_bits(hw, ATL_RX_FLT_CTRL1, 12, 3, 1);
+	/* Enable BC */
+	atl_write_bit(hw, ATL_RX_FLT_CTRL1, 0, 1);
+	/* BC thresh */
+	atl_write_bits(hw, ATL_RX_FLT_CTRL1, 16, 16, 0x1000);
+
+	/* Enable untagged packets */
+	atl_write(hw, ATL_RX_VLAN_FLT_CTRL1, 1 << 2 | 1 << 3);
+
+	/* Reprogram ethtool Rx filters */
+	atl_refresh_rxfs(nic);
+
+	atl_set_rss_key(hw);
+	/* Enable RSS | 8 queues per TC */
+	atl_write(hw, ATL_RX_RSS_CTRL, BIT(31) | 3);
+
+	/* Global interrupt block init */
+	if (nic->flags & ATL_FL_MULTIPLE_VECTORS) {
+		/* MSI or MSI-X mode interrupt mode */
+		uint32_t ctrl = hw->pdev->msix_enabled ? 2 : 1;
+
+		/* Enable multi-vector mode and mask autoclear
+		 * register */
+		ctrl |= BIT(2) | BIT(5);
+
+		atl_write(hw, ATL_INTR_CTRL, ctrl);
+
+		/* Enable auto-masking of link interrupt on intr generation */
+		atl_set_bits(hw, ATL_INTR_AUTO_MASK, BIT(0));
+		/* Enable status auto-clear on link intr generation */
+		atl_set_bits(hw, ATL_INTR_AUTO_CLEAR, BIT(0));
+	} else
+		/* Enable legacy INTx mode and status clear-on-read */
+		atl_write(hw, ATL_INTR_CTRL, BIT(7));
+
+	/* Map link interrupt to cause 0 */
+	atl_write(hw, ATL_INTR_GEN_INTR_MAP4, BIT(7) | (0 << 0));
+
+	atl_write(hw, ATL_TX_INTR_CTRL, BIT(4));
+	atl_write(hw, ATL_RX_INTR_CTRL, 2 << 4 | BIT(3));
+
+	/* Reset Rx/Tx on unexpected PERST# */
+	atl_write_bit(hw, 0x1000, 29, 0);
+	atl_write(hw, 0x448, 3);
+
+	/* Enable non-ring interrupts */
+	atl_intr_enable(hw, hw->intr_mask | (uint32_t)(nic->fwd.msi_map));
+}
+
+#define atl_vlan_flt_val(vid) ((uint32_t)(vid) | 1 << 16 | 1 << 31)
+
+static void atl_set_all_multi(struct atl_hw *hw, bool all_multi)
+{
+	atl_write_bit(hw, ATL_RX_MC_FLT_MSK, 14, all_multi);
+	atl_write(hw, ATL_RX_MC_FLT(0), all_multi ? 0x80010000 : 0);
+}
+
+void atl_set_rx_mode(struct net_device *ndev)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	int uc_count = netdev_uc_count(ndev), mc_count = netdev_mc_count(ndev);
+	int promisc_needed = !!(ndev->flags & IFF_PROMISC);
+	int all_multi_needed = !!(ndev->flags & IFF_ALLMULTI);
+	int i = 1; /* UC filter 0 reserved for MAC address */
+	struct netdev_hw_addr *hwaddr;
+
+	if (uc_count > ATL_UC_FLT_NUM - 1)
+		promisc_needed |= 1;
+	else if (uc_count + mc_count > ATL_UC_FLT_NUM - 1)
+		all_multi_needed |= 1;
+
+
+	/* Enable promisc VLAN mode iff IFF_PROMISC explicitly
+	 * requested or too many VIDs registered
+	 */
+	atl_set_vlan_promisc(hw,
+		ndev->flags & IFF_PROMISC || nic->rxf_vlan.promisc_count);
+
+	atl_write_bit(hw, ATL_RX_FLT_CTRL1, 3, promisc_needed);
+	if (promisc_needed)
+		return;
+
+	netdev_for_each_uc_addr(hwaddr, ndev)
+		atl_set_uc_flt(hw, i++, hwaddr->addr);
+
+	atl_set_all_multi(hw, all_multi_needed);
+
+	if (!all_multi_needed)
+		netdev_for_each_mc_addr(hwaddr, ndev)
+			atl_set_uc_flt(hw, i++, hwaddr->addr);
+
+	while (i < ATL_UC_FLT_NUM)
+		atl_disable_uc_flt(hw, i++);
+}
+
+int atl_alloc_descs(struct atl_nic *nic, struct atl_hw_ring *ring)
+{
+	struct device *dev = &nic->hw.pdev->dev;
+
+	ring->descs = dma_alloc_coherent(dev, ring->size * sizeof(*ring->descs),
+					 &ring->daddr, GFP_KERNEL);
+
+	if (!ring->descs)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void atl_free_descs(struct atl_nic *nic, struct atl_hw_ring *ring)
+{
+	struct device *dev = &nic->hw.pdev->dev;
+
+	if (!ring->descs)
+		return;
+
+	dma_free_coherent(dev, ring->size * sizeof(*ring->descs),
+		ring->descs, ring->daddr);
+	ring->descs = 0;
+}
+
+void atl_set_intr_bits(struct atl_hw *hw, int idx, int rxbit, int txbit)
+{
+	int shift = idx & 1 ? 0 : 8;
+	uint32_t clear_mask = 0;
+	uint32_t set_mask = 0;
+	uint32_t val;
+
+	if (rxbit >= 0) {
+		clear_mask |= BIT(7) | (BIT(5) - 1);
+		if (rxbit < ATL_NUM_MSI_VECS)
+			set_mask |= BIT(7) | rxbit;
+	}
+	if (txbit >= 0) {
+		clear_mask |= (BIT(7) | (BIT(5) - 1)) << 0x10;
+		if (txbit < ATL_NUM_MSI_VECS)
+			set_mask |= (BIT(7) | txbit) << 0x10;
+	}
+
+	val = atl_read(hw, ATL_INTR_RING_INTR_MAP(idx));
+	val &= ~(clear_mask << shift);
+	val |= set_mask << shift;
+	atl_write(hw, ATL_INTR_RING_INTR_MAP(idx), val);
+}
+
+void atl_set_loopback(struct atl_nic *nic, int idx, bool on)
+{
+	struct atl_hw *hw = &nic->hw;
+
+	switch (idx) {
+	case ATL_PF_LPB_SYS_DMA:
+		atl_write_bit(hw, ATL_TX_CTRL1, 6, on);
+		atl_write_bit(hw, ATL_RX_CTRL1, 6, on);
+		break;
+	case ATL_PF_LPB_SYS_PB:
+		atl_write_bit(hw, ATL_TX_CTRL1, 7, on);
+		atl_write_bit(hw, ATL_RX_CTRL1, 8, on);
+		break;
+	/* case ATL_PF_LPB_NET_DMA: */
+	/* 	atl_write_bit(hw, ATL_TX_CTRL1, 4, on); */
+	/* 	atl_write_bit(hw, ATL_RX_CTRL1, 4, on); */
+	/* 	break; */
+	}
+}
+
+void atl_update_ntuple_flt(struct atl_nic *nic, int idx)
+{
+	struct atl_hw *hw = &nic->hw;
+	struct atl_rxf_ntuple *ntuple = &nic->rxf_ntuple;
+	uint32_t cmd = ntuple->cmd[idx];
+	int i, len = 1;
+
+	if (!(cmd & ATL_NTC_EN)) {
+		atl_write(hw, ATL_NTUPLE_CTRL(idx), cmd);
+		return;
+	}
+
+	if (cmd & ATL_NTC_V6)
+		len = 4;
+
+	for (i = idx; i < idx + len; i++) {
+		if (cmd & ATL_NTC_SA)
+			atl_write(hw, ATL_NTUPLE_SADDR(i),
+				swab32(ntuple->src_ip4[i]));
+
+		if (cmd & ATL_NTC_DA)
+			atl_write(hw, ATL_NTUPLE_DADDR(i),
+				swab32(ntuple->dst_ip4[i]));
+	}
+
+	if (cmd & ATL_NTC_SP)
+		atl_write(hw, ATL_NTUPLE_SPORT(idx),
+			swab16(ntuple->src_port[idx]));
+
+	if (cmd & ATL_NTC_DP)
+		atl_write(hw, ATL_NTUPLE_DPORT(idx),
+			swab16(ntuple->dst_port[idx]));
+
+	if (cmd & ATL_NTC_RXQ)
+		cmd |= 1 << ATL_NTC_ACT_SHIFT;
+
+	atl_write(hw, ATL_NTUPLE_CTRL(idx), cmd);
+}
+
+int atl_hwsem_get(struct atl_hw *hw, int idx)
+{
+	uint32_t val;
+
+	busy_wait(10000, udelay(1), val, atl_read(hw, ATL_MCP_SEM(idx)), !val);
+
+	if (!val)
+		return -ETIME;
+
+	return 0;
+}
+
+void atl_hwsem_put(struct atl_hw *hw, int idx)
+{
+	atl_write(hw, ATL_MCP_SEM(idx), 1);
+}
+
+static int atl_msm_wait(struct atl_hw *hw)
+{
+	uint32_t val;
+
+	busy_wait(10, udelay(1), val, atl_read(hw, ATL_MPI_MSM_ADDR),
+		val & BIT(12));
+	if (val & BIT(12))
+		return -ETIME;
+
+	return 0;
+}
+
+int __atl_msm_read(struct atl_hw *hw, uint32_t addr, uint32_t *val)
+{
+	int ret;
+
+	ret = atl_msm_wait(hw);
+	if (ret)
+		return ret;
+
+	atl_write(hw, ATL_MPI_MSM_ADDR, (addr >> 2) | BIT(9));
+	ret = atl_msm_wait(hw);
+	if (ret)
+		return ret;
+
+	*val = atl_read(hw, ATL_MPI_MSM_RD);
+	return 0;
+}
+
+int atl_msm_read(struct atl_hw *hw, uint32_t addr, uint32_t *val)
+{
+	int ret;
+
+	ret = atl_hwsem_get(hw, ATL_MCP_SEM_MSM);
+	if (ret)
+		return ret;
+
+	ret = __atl_msm_read(hw, addr, val);
+	atl_hwsem_put(hw, ATL_MCP_SEM_MSM);
+
+	return ret;
+}
+
+int __atl_msm_write(struct atl_hw *hw, uint32_t addr, uint32_t val)
+{
+	int ret;
+
+	ret = atl_msm_wait(hw);
+	if (ret)
+		return ret;
+
+	atl_write(hw, ATL_MPI_MSM_WR, val);
+	atl_write(hw, ATL_MPI_MSM_ADDR, addr | BIT(8));
+	ret = atl_msm_wait(hw);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int atl_msm_write(struct atl_hw *hw, uint32_t addr, uint32_t val)
+{
+	int ret;
+
+	ret = atl_hwsem_get(hw, ATL_MCP_SEM_MSM);
+	if (ret)
+		return ret;
+
+	ret = __atl_msm_write(hw, addr, val);
+	atl_hwsem_put(hw, ATL_MCP_SEM_MSM);
+
+	return ret;
+}
+
+static int atl_mdio_wait(struct atl_hw *hw)
+{
+	uint32_t val;
+
+	busy_wait(20, udelay(1), val, atl_read(hw, ATL_GLOBAL_MDIO_CMD),
+		val & BIT(31));
+	if (val & BIT(31))
+		return -ETIME;
+
+	return 0;
+}
+
+int atl_mdio_hwsem_get(struct atl_hw *hw)
+{
+	int ret;
+
+	ret = atl_hwsem_get(hw, ATL_MCP_SEM_MDIO);
+	if (ret)
+		return ret;
+
+	/* Enable MDIO Clock (active low) in case MBU have disabled
+	 * it. */
+	atl_write_bit(hw, ATL_GLOBAL_MDIO_CTL, 14, 0);
+	return 0;
+}
+
+void atl_mdio_hwsem_put(struct atl_hw *hw)
+{
+	/* It's ok to leave MDIO Clock running according to FW
+	 * guys. In fact that's what FW does. */
+	atl_hwsem_put(hw, ATL_MCP_SEM_MDIO);
+}
+
+static void atl_mdio_set_addr(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr)
+{
+	/* Set address */
+	atl_write(hw, ATL_GLOBAL_MDIO_ADDR, addr & (BIT(16) - 1));
+	/* Address operation | execute | prtad + mmd */
+	atl_write(hw, ATL_GLOBAL_MDIO_CMD, BIT(15) | 3 << 12 |
+		prtad << 5 | mmd);
+}
+
+int __atl_mdio_read(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t *val)
+{
+	int ret;
+
+	ret = atl_mdio_wait(hw);
+	if (ret)
+		return ret;
+
+	atl_mdio_set_addr(hw, prtad, mmd, addr);
+	ret = atl_mdio_wait(hw);
+	if (ret)
+		return ret;
+
+	/* Read operation | execute | prtad + mmd */
+	atl_write(hw, ATL_GLOBAL_MDIO_CMD, BIT(15) | 1 << 12 |
+		prtad << 5 | mmd);
+
+	ret = atl_mdio_wait(hw);
+	if (ret)
+		return ret;
+
+	*val = atl_read(hw, ATL_GLOBAL_MDIO_RDATA);
+	return 0;
+}
+
+int atl_mdio_read(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t *val)
+{
+	int ret;
+
+	ret = atl_mdio_hwsem_get(hw);
+	if (ret)
+		return ret;
+
+	ret = __atl_mdio_read(hw, prtad, mmd, addr, val);
+	atl_mdio_hwsem_put(hw);
+
+	return ret;
+}
+
+int __atl_mdio_write(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t val)
+{
+	int ret;
+
+	ret = atl_mdio_wait(hw);
+	if (ret)
+		return ret;
+
+	atl_mdio_set_addr(hw, prtad, mmd, addr);
+	ret = atl_mdio_wait(hw);
+	if (ret)
+		return ret;
+
+	atl_write(hw, ATL_GLOBAL_MDIO_WDATA, val);
+	/* Write operation | execute | prtad + mmd */
+	atl_write(hw, ATL_GLOBAL_MDIO_CMD, BIT(15) | 2 << 12 |
+		prtad << 5 | mmd);
+	ret = atl_mdio_wait(hw);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int atl_mdio_write(struct atl_hw *hw, uint8_t prtad, uint8_t mmd,
+	uint16_t addr, uint16_t val)
+{
+	int ret;
+
+	ret = atl_mdio_hwsem_get(hw);
+	if (ret)
+		return ret;
+
+	ret = __atl_mdio_write(hw, prtad, mmd, addr, val);
+	atl_mdio_hwsem_put(hw);
+
+	return 0;
+}
+
+#define __READ_MSM_OR_GOTO(RET, HW, REGISTER, PVARIABLE, label) \
+	RET = __atl_msm_read(HW, REGISTER, PVARIABLE); \
+	if (RET)							\
+		goto label;
+
+void atl_adjust_eth_stats(struct atl_ether_stats *stats,
+	struct atl_ether_stats *base, bool add)
+{
+	int i;
+	uint64_t *_stats = (uint64_t *)stats;
+	uint64_t *_base = (uint64_t *)base;
+
+	for (i = 0; i < sizeof(*stats) / sizeof(uint64_t); i++)
+		_stats[i] += add ? _base[i] : - _base[i];
+}
+
+int atl_update_eth_stats(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+	struct atl_ether_stats stats = {0};
+	uint32_t reg = 0, reg2 = 0;
+	int ret;
+
+	ret = atl_hwsem_get(hw, ATL_MCP_SEM_MSM);
+	if (ret)
+		return ret;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_TX_PAUSE, &reg, hwsem_put);
+	stats.tx_pause = reg;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_PAUSE, &reg, hwsem_put);
+	stats.rx_pause = reg;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_OCTETS_LO, &reg, hwsem_put);
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_OCTETS_HI, &reg2, hwsem_put);
+	stats.rx_ether_octets = ((uint64_t)reg2 << 32) | reg;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_PKTS_GOOD, &reg, hwsem_put);
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_ERRS, &reg2, hwsem_put);
+	stats.rx_ether_pkts = reg + reg2;;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_BROADCAST, &reg, hwsem_put);
+	stats.rx_ether_broacasts = reg;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_MULTICAST, &reg, hwsem_put);
+	stats.rx_ether_multicasts = reg;
+
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_FCS_ERRS, &reg, hwsem_put);
+	__READ_MSM_OR_GOTO(ret, hw, ATL_MSM_CTR_RX_ALIGN_ERRS, &reg2, hwsem_put);
+	stats.rx_ether_crc_align_errs = reg + reg2;
+
+	stats.rx_ether_drops = atl_read(hw, ATL_RX_DMA_STATS_CNT7);
+
+	/* capture debug counters*/
+	atl_write_bit(hw, ATL_RX_RPF_DBG_CNT_CTRL, 0x1f, 1);
+
+	reg = atl_read(hw, ATL_RX_RPF_HOST_CNT_LO);
+	reg2 = atl_read(hw, ATL_RX_RPF_HOST_CNT_HI);
+	stats.rx_filter_host = ((uint64_t)reg2 << 32) | reg;
+
+	reg = atl_read(hw, ATL_RX_RPF_LOST_CNT_LO);
+	reg2 = atl_read(hw, ATL_RX_RPF_LOST_CNT_HI);
+	stats.rx_filter_lost = ((uint64_t)reg2 << 32) | reg;
+
+	spin_lock(&nic->stats_lock);
+
+	atl_adjust_eth_stats(&stats, &nic->stats.eth_base, false);
+	nic->stats.eth = stats;
+
+	spin_unlock(&nic->stats_lock);
+
+	ret = 0;
+
+hwsem_put:
+	atl_hwsem_put(hw, ATL_MCP_SEM_MSM);
+	return ret;
+}
+#undef __READ_MSM_OR_GOTO
+
+int atl_get_lpi_timer(struct atl_nic *nic, uint32_t *lpi_delay)
+{
+	struct atl_hw *hw = &nic->hw;
+	uint32_t lpi;
+	int ret = 0;
+
+
+	ret = atl_msm_read(hw, ATL_MSM_TX_LPI_DELAY, &lpi);
+	if (ret)
+		return ret;
+	*lpi_delay = ATL_HW_CLOCK_TO_US(lpi);
+
+	return ret;
+}
+
+static uint32_t atl_mcp_mbox_wait(struct atl_hw *hw, int loops)
+{
+	uint32_t stat;
+
+	busy_wait(loops, cpu_relax(), stat,
+		(atl_read(hw, ATL_MCP_SCRATCH(FW2_MBOX_CMD)) >> 28) & 0xf,
+		stat == 8);
+
+	return stat;
+}
+
+int atl_write_mcp_mem(struct atl_hw *hw, uint32_t offt, void *host_addr,
+	size_t size)
+{
+	uint32_t *addr = (uint32_t *)host_addr;
+
+	while (size) {
+		uint32_t stat;
+
+		atl_write(hw, ATL_MCP_SCRATCH(FW2_MBOX_DATA), *addr++);
+		atl_write(hw, ATL_MCP_SCRATCH(FW2_MBOX_CMD), BIT(31) | offt);
+		ndelay(750);
+		stat = atl_mcp_mbox_wait(hw, 5);
+
+		if (stat == 8) {
+			/* Send MCP mbox interrupt */
+			atl_set_bits(hw, ATL_GLOBAL_CTRL2, BIT(1));
+			ndelay(1200);
+			stat = atl_mcp_mbox_wait(hw, 10000);
+		}
+
+		if (stat == 8) {
+			atl_dev_err("FW mbox timeout offt %x, remaining %lx\n",
+				offt, size);
+			return -ETIME;
+		} else if (stat != 4) {
+			atl_dev_err("FW mbox error status %x, offt %x, remaining %lx\n",
+				stat, offt, size);
+			return -EIO;
+		}
+
+		offt += 4;
+		size -= 4;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.h
new file mode 100644
index 0000000..5acc58a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hw.h
@@ -0,0 +1,185 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_HW_H_
+#define _ATL_HW_H_
+#include <linux/pci.h>
+#include <linux/if_ether.h>
+
+#include "atl_regs.h"
+#include "atl_fw.h"
+
+#define PCI_VENDOR_ID_AQUANTIA 0x1d6a
+
+/* clock is 3.2 ns*/
+#define ATL_HW_CLOCK_TO_US(clk)  (clk * 32 / 10000)
+
+#define busy_wait(tries, wait, lvalue, fetch, cond)	\
+({							\
+	uint32_t _dummy = 0;				\
+	int i = (tries);				\
+	int orig = i;					\
+	(void)_dummy;					\
+	do {						\
+		wait;					\
+		(lvalue) = (fetch);			\
+	} while ((cond) && --i);			\
+	(orig - i);					\
+})
+
+enum atl_board {
+	ATL_UNKNOWN,
+	ATL_AQC107,
+	ATL_AQC108,
+	ATL_AQC109,
+	ATL_AQC100,
+};
+
+struct atl_hw {
+	uint8_t __iomem *regs;
+	struct pci_dev *pdev;
+	struct atl_link_state link_state;
+	struct {
+		uint32_t fw_rev;
+		bool poll_link;
+		struct atl_fw_ops *ops;
+		uint32_t fw_stat_addr;
+		struct mutex lock;
+	} mcp;
+	uint32_t intr_mask;
+	uint8_t mac_addr[ETH_ALEN];
+#define ATL_RSS_KEY_SIZE 40
+	uint8_t rss_key[ATL_RSS_KEY_SIZE];
+#define ATL_RSS_TBL_SIZE (1 << 6)
+	uint8_t rss_tbl[ATL_RSS_TBL_SIZE];
+};
+
+union atl_desc;
+struct atl_hw_ring {
+	union atl_desc *descs;
+	uint32_t size;
+	uint32_t reg_base;
+	dma_addr_t daddr;
+};
+
+#define offset_ptr(ptr, ring, amount)					\
+	({								\
+		uint32_t size = ((struct atl_hw_ring *)(ring))->size;	\
+									\
+		uint32_t res = (ptr) + (amount);			\
+		if ((int32_t)res < 0)					\
+			res += size;					\
+		else if (res >= size)					\
+			res -= size;					\
+		res;							\
+	})
+
+void atl_check_unplug(struct atl_hw *hw, uint32_t addr);
+
+static inline uint32_t atl_read(struct atl_hw *hw, uint32_t addr)
+{
+	uint8_t __iomem *base = READ_ONCE(hw->regs);
+	uint32_t val = 0xffffffff;
+
+	if (unlikely(!base))
+		return val;
+
+	val = readl(base + addr);
+	if (unlikely(val == 0xffffffff))
+		atl_check_unplug(hw, addr);
+	return val;
+}
+
+static inline void atl_write(struct atl_hw *hw, uint32_t addr, uint32_t val)
+{
+	uint8_t __iomem *base = READ_ONCE(hw->regs);
+
+	if (unlikely(!base))
+		return;
+
+	writel(val, base + addr);
+}
+
+static inline void atl_write_bits(struct atl_hw *hw, uint32_t addr,
+			     uint32_t shift, uint32_t width, uint32_t val)
+{
+	uint32_t mask = ((1u << width) - 1) << shift;
+
+	atl_write(hw, addr,
+		  (atl_read(hw, addr) & ~mask) | ((val << shift) & mask));
+}
+
+static inline void atl_write_bit(struct atl_hw *hw, uint32_t addr,
+			    uint32_t shift, uint32_t val)
+{
+	atl_write_bits(hw, addr, shift, 1, val);
+}
+
+static inline void atl_set_bits(struct atl_hw *hw, uint32_t addr,
+	uint32_t bits)
+{
+	atl_write(hw, addr, atl_read(hw, addr) | bits);
+}
+
+static inline void atl_clear_bits(struct atl_hw *hw, uint32_t addr,
+	uint32_t bits)
+{
+	atl_write(hw, addr, atl_read(hw, addr) & ~bits);
+}
+
+static inline void atl_intr_enable(struct atl_hw *hw, uint32_t mask)
+{
+	atl_write(hw, ATL_INTR_MSK_SET, mask);
+}
+
+static inline void atl_intr_disable(struct atl_hw *hw, uint32_t mask)
+{
+	atl_write(hw, ATL_INTR_MSK_CLEAR, mask);
+}
+
+static inline void atl_intr_disable_all(struct atl_hw *hw)
+{
+	atl_intr_disable(hw, 0xffffffff);
+}
+
+static inline unsigned atl_fw_major(struct atl_hw *hw)
+{
+	return (hw->mcp.fw_rev >> 24) & 0xff;
+}
+
+static inline void atl_init_rss_table(struct atl_hw *hw, int nvecs)
+{
+	int i;
+
+	for (i = 0; i < ATL_RSS_TBL_SIZE; i++)
+		hw->rss_tbl[i] = i % nvecs;
+}
+
+static inline void atl_set_vlan_promisc(struct atl_hw *hw, int promisc)
+{
+	atl_write_bit(hw, ATL_RX_VLAN_FLT_CTRL1, 1, !!promisc);
+}
+
+int atl_read_mcp_mem(struct atl_hw *hw, uint32_t mcp_addr, void *host_addr,
+	unsigned size);
+int atl_hwinit(struct atl_nic *nic, enum atl_board brd_id);
+void atl_refresh_link(struct atl_nic *nic);
+void atl_set_rss_key(struct atl_hw *hw);
+void atl_set_rss_tbl(struct atl_hw *hw);
+void atl_set_uc_flt(struct atl_hw *hw, int idx, uint8_t mac_addr[ETH_ALEN]);
+
+int atl_alloc_descs(struct atl_nic *nic, struct atl_hw_ring *ring);
+void atl_free_descs(struct atl_nic *nic, struct atl_hw_ring *ring);
+void atl_set_intr_bits(struct atl_hw *hw, int idx, int rxbit, int txbit);
+int atl_alloc_link_intr(struct atl_nic *nic);
+void atl_free_link_intr(struct atl_nic *nic);
+int atl_write_mcp_mem(struct atl_hw *hw, uint32_t offt, void *addr,
+	size_t size);
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hwmon.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hwmon.c
new file mode 100644
index 0000000..21e3ea0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_hwmon.c
@@ -0,0 +1,84 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2018 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include "atl_common.h"
+#include <linux/hwmon.h>
+
+static char *atl_hwmon_labels[] = {
+	"PHY Temperature",
+};
+
+static const uint32_t atl_hwmon_temp_config[] = {
+	HWMON_T_INPUT | HWMON_T_LABEL,
+	0,
+};
+
+static const struct hwmon_channel_info atl_hwmon_temp = {
+	.type = hwmon_temp,
+	.config = atl_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *atl_hwmon_info[] = {
+	&atl_hwmon_temp,
+	NULL,
+};
+
+static umode_t atl_hwmon_is_visible(const void *p,
+	enum hwmon_sensor_types type, uint32_t attr, int channel)
+{
+	return type == hwmon_temp ? S_IRUGO : 0;
+}
+
+static int atl_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+	uint32_t attr, int channel, long *val)
+{
+	struct atl_hw *hw = dev_get_drvdata(dev);
+	int temp, ret;
+
+	if (type != hwmon_temp || attr != hwmon_temp_input)
+		return -EINVAL;
+
+	ret = hw->mcp.ops->get_phy_temperature(hw, &temp);
+	if (ret)
+		return ret;
+
+	*val = temp;
+	return 0;
+}
+
+static int atl_hwmon_read_string(struct device *dev,
+	enum hwmon_sensor_types type, u32 attr, int channel, const char **str)
+{
+	if (type != hwmon_temp || attr != hwmon_temp_label)
+		return -EINVAL;
+
+	*str = atl_hwmon_labels[channel];
+	return 0;
+}
+
+static const struct hwmon_ops atl_hwmon_ops = {
+	.is_visible = atl_hwmon_is_visible,
+	.read = atl_hwmon_read,
+	.read_string = atl_hwmon_read_string,
+};
+
+static const struct hwmon_chip_info atl_hwmon = {
+	.ops = &atl_hwmon_ops,
+	.info = atl_hwmon_info,
+};
+
+int atl_hwmon_init(struct atl_nic *nic)
+{
+	struct device *hwmon_dev;
+
+	hwmon_dev = devm_hwmon_device_register_with_info(&nic->hw.pdev->dev,
+		nic->ndev->name, &nic->hw, &atl_hwmon, NULL);
+
+	return PTR_ERR_OR_ZERO(hwmon_dev);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_main.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_main.c
new file mode 100644
index 0000000..4f76d9b
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_main.c
@@ -0,0 +1,612 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include "atl_common.h"
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+
+const char atl_driver_name[] = "atlantic-fwd";
+
+int atl_max_queues = ATL_MAX_QUEUES;
+module_param_named(max_queues, atl_max_queues, uint, 0444);
+
+static unsigned int atl_rx_mod = 15, atl_tx_mod = 15;
+module_param_named(rx_mod, atl_rx_mod, uint, 0444);
+module_param_named(tx_mod, atl_tx_mod, uint, 0444);
+
+static unsigned int atl_keep_link = 0;
+module_param_named(keep_link, atl_keep_link, uint, 0644);
+
+static void atl_link_up(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+
+	if (hw->mcp.poll_link)
+		mod_timer(&nic->link_timer, jiffies + HZ);
+
+	hw->link_state.force_off = 0;
+	hw->mcp.ops->set_link(hw, true);
+}
+
+static int atl_do_open(struct atl_nic *nic)
+{
+	int ret;
+
+	ret = atl_start_rings(nic);
+	if (ret)
+		return ret;
+
+	if (!atl_keep_link)
+		atl_link_up(nic);
+
+	return 0;
+}
+
+static int atl_open(struct net_device *ndev)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	int ret;
+
+	if (!test_bit(ATL_ST_CONFIGURED, &nic->state)) {
+		/* A previous atl_reconfigure() had failed. Try once more. */
+		ret = atl_setup_datapath(nic);
+		if (ret)
+			return ret;
+	}
+
+	ret = atl_alloc_rings(nic);
+	if (ret)
+		return ret;
+
+	ret = netif_set_real_num_tx_queues(ndev, nic->nvecs);
+	if (ret)
+		goto free_rings;
+	ret = netif_set_real_num_rx_queues(ndev, nic->nvecs);
+	if (ret)
+		goto free_rings;
+
+	ret = atl_do_open(nic);
+	if (ret)
+		goto free_rings;
+
+	netif_tx_start_all_queues(ndev);
+
+	set_bit(ATL_ST_UP, &nic->state);
+	return 0;
+
+free_rings:
+	atl_free_rings(nic);
+	return ret;
+}
+
+static void atl_link_down(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+
+	del_timer_sync(&nic->link_timer);
+	hw->link_state.force_off = 1;
+	hw->mcp.ops->set_link(hw, true);
+	hw->link_state.link = 0;
+	netif_carrier_off(nic->ndev);
+}
+
+static void atl_do_close(struct atl_nic *nic)
+{
+	if (!atl_keep_link)
+		atl_link_down(nic);
+
+	atl_stop_rings(nic);
+}
+
+static int atl_close(struct net_device *ndev)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	/* atl_close() can be called a second time if
+	 * atl_reconfigure() fails. Just return
+	 */
+	if (!test_and_clear_bit(ATL_ST_UP, &nic->state))
+		return 0;
+
+	netif_tx_stop_all_queues(ndev);
+
+	atl_do_close(nic);
+	atl_free_rings(nic);
+
+	return 0;
+}
+
+#ifndef ATL_HAVE_MINMAX_MTU
+
+static int atl_change_mtu(struct net_device *ndev, int mtu)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+
+	if (mtu < 64 || mtu > nic->max_mtu)
+		return -EINVAL;
+
+	ndev->mtu = mtu;
+	return 0;
+}
+
+#endif
+
+static int atl_set_mac_address(struct net_device *ndev, void *priv)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_hw *hw = &nic->hw;
+	struct sockaddr *addr = priv;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	ether_addr_copy(hw->mac_addr, addr->sa_data);
+	ether_addr_copy(ndev->dev_addr, addr->sa_data);
+
+	if (netif_running(ndev))
+		atl_set_uc_flt(hw, 0, hw->mac_addr);
+
+	return 0;
+}
+
+static const struct net_device_ops atl_ndev_ops = {
+	.ndo_open = atl_open,
+	.ndo_stop = atl_close,
+	.ndo_start_xmit = atl_start_xmit,
+	.ndo_vlan_rx_add_vid = atl_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid = atl_vlan_rx_kill_vid,
+	.ndo_set_rx_mode = atl_set_rx_mode,
+#ifndef ATL_HAVE_MINMAX_MTU
+	.ndo_change_mtu = atl_change_mtu,
+#endif
+	.ndo_set_features = atl_set_features,
+	.ndo_set_mac_address = atl_set_mac_address,
+#ifdef ATL_COMPAT_CAST_NDO_GET_STATS64
+	.ndo_get_stats64 = (void *)atl_get_stats64,
+#else
+	.ndo_get_stats64 = atl_get_stats64,
+#endif
+};
+
+/* RTNL lock must be held */
+int atl_reconfigure(struct atl_nic *nic)
+{
+	struct net_device *ndev = nic->ndev;
+	int was_up = netif_running(ndev);
+	int ret = 0;
+
+	if (was_up)
+		atl_close(ndev);
+
+	atl_clear_datapath(nic);
+
+	ret = atl_setup_datapath(nic);
+	if (ret)
+		goto err;
+
+	/* Number of rings might have changed, re-init RSS
+	 * redirection table.
+	 */
+	atl_init_rss_table(&nic->hw, nic->nvecs);
+
+	if (was_up) {
+		ret = atl_open(ndev);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	if (was_up)
+		dev_close(ndev);
+	return ret;
+}
+
+static struct workqueue_struct *atl_wq;
+
+void atl_schedule_work(struct atl_nic *nic)
+{
+	if (!test_and_set_bit(ATL_ST_WORK_SCHED, &nic->state))
+		queue_work(atl_wq, &nic->work);
+}
+
+static void atl_work(struct work_struct *work)
+{
+	struct atl_nic *nic = container_of(work, struct atl_nic, work);
+
+	atl_refresh_link(nic);
+	clear_bit(ATL_ST_WORK_SCHED, &nic->state);
+}
+
+static void atl_link_timer(struct timer_list *timer)
+{
+	struct atl_nic *nic =
+		container_of(timer, struct atl_nic, link_timer);
+
+	atl_schedule_work(nic);
+	mod_timer(&nic->link_timer, jiffies + HZ);
+}
+
+static const struct pci_device_id atl_pci_tbl[] = {
+	{ PCI_VDEVICE(AQUANTIA, 0x0001), ATL_UNKNOWN},
+	{ PCI_VDEVICE(AQUANTIA, 0xd107), ATL_AQC107},
+	{ PCI_VDEVICE(AQUANTIA, 0x07b1), ATL_AQC107},
+	{ PCI_VDEVICE(AQUANTIA, 0x87b1), ATL_AQC107},
+	{ PCI_VDEVICE(AQUANTIA, 0xd108), ATL_AQC108},
+	{ PCI_VDEVICE(AQUANTIA, 0x08b1), ATL_AQC108},
+	{ PCI_VDEVICE(AQUANTIA, 0x88b1), ATL_AQC108},
+	{ PCI_VDEVICE(AQUANTIA, 0xd109), ATL_AQC109},
+	{ PCI_VDEVICE(AQUANTIA, 0x09b1), ATL_AQC109},
+	{ PCI_VDEVICE(AQUANTIA, 0x89b1), ATL_AQC109},
+	{ PCI_VDEVICE(AQUANTIA, 0xd100), ATL_AQC100},
+	{ PCI_VDEVICE(AQUANTIA, 0x00b1), ATL_AQC107},
+	{ PCI_VDEVICE(AQUANTIA, 0x80b1), ATL_AQC107},
+	{ PCI_VDEVICE(AQUANTIA, 0x11b1), ATL_AQC108},
+	{ PCI_VDEVICE(AQUANTIA, 0x91b1), ATL_AQC108},
+	{ PCI_VDEVICE(AQUANTIA, 0x51b1), ATL_AQC108},
+	{ PCI_VDEVICE(AQUANTIA, 0x12b1), ATL_AQC109},
+	{ PCI_VDEVICE(AQUANTIA, 0x92b1), ATL_AQC109},
+	{ PCI_VDEVICE(AQUANTIA, 0x52b1), ATL_AQC109},
+	{}
+};
+
+static uint8_t atl_def_rss_key[ATL_RSS_KEY_SIZE] = {
+	0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
+	0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
+	0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
+	0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
+	0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
+};
+
+static void atl_setup_rss(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+
+	memcpy(hw->rss_key, atl_def_rss_key, sizeof(hw->rss_key));
+
+	atl_init_rss_table(hw, nic->nvecs);
+}
+
+static int atl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	int ret, pci_64 = 0;
+	struct net_device *ndev;
+	struct atl_nic *nic = NULL;
+	struct atl_hw *hw;
+	int disable_needed;
+
+	if (atl_max_queues < 1 || atl_max_queues > ATL_MAX_QUEUES) {
+		dev_err(&pdev->dev, "Bad atl_max_queues value %d, must be between 1 and %d inclusive\n",
+			 atl_max_queues, ATL_MAX_QUEUES);
+		return -EINVAL;
+	}
+
+	ret = pci_enable_device_mem(pdev);
+	if (ret)
+		return ret;
+
+	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
+		pci_64 = 1;
+	else {
+		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (ret) {
+			dev_err(&pdev->dev, "Set DMA mask failed: %d\n", ret);
+			goto err_dma;
+		}
+	}
+
+	ret = pci_request_mem_regions(pdev, atl_driver_name);
+	if (ret) {
+		dev_err(&pdev->dev, "Request PCI regions failed: %d\n", ret);
+		goto err_pci_reg;
+	}
+
+	pci_set_master(pdev);
+
+	ndev = alloc_etherdev_mq(sizeof(struct atl_nic), atl_max_queues);
+	if (!ndev) {
+		ret = -ENOMEM;
+		goto err_alloc_ndev;
+	}
+
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	nic = netdev_priv(ndev);
+	nic->ndev = ndev;
+	nic->hw.pdev = pdev;
+	spin_lock_init(&nic->stats_lock);
+	INIT_WORK(&nic->work, atl_work);
+	mutex_init(&nic->hw.mcp.lock);
+	__set_bit(ATL_ST_ENABLED, &nic->state);
+
+	hw = &nic->hw;
+	hw->regs = ioremap(pci_resource_start(pdev, 0),
+				pci_resource_len(pdev, 0));
+	if (!hw->regs) {
+		ret = -EIO;
+		goto err_ioremap;
+	}
+
+	ret = atl_hwinit(nic, id->driver_data);
+	if (ret)
+		goto err_hwinit;
+
+	eth_platform_get_mac_address(&hw->pdev->dev, hw->mac_addr);
+	if (!is_valid_ether_addr(hw->mac_addr)) {
+		atl_dev_err("invalid MAC address: %*phC\n", ETH_ALEN,
+			    hw->mac_addr);
+		/* XXX Workaround for bad MAC addr in efuse. Maybe
+		 * switch to some predefined one later.
+		 */
+		eth_random_addr(hw->mac_addr);
+		/* ret = -EIO; */
+		/* goto err_hwinit; */
+	}
+
+	ether_addr_copy(ndev->dev_addr, hw->mac_addr);
+	atl_dev_dbg("got MAC address: %pM\n", hw->mac_addr);
+
+	nic->requested_nvecs = atl_max_queues;
+	nic->requested_tx_size = ATL_RING_SIZE;
+	nic->requested_rx_size = ATL_RING_SIZE;
+	nic->rx_intr_delay = atl_rx_mod;
+	nic->tx_intr_delay = atl_tx_mod;
+
+	ret = atl_setup_datapath(nic);
+	if (ret)
+		goto err_datapath;
+
+	atl_setup_rss(nic);
+
+	ndev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+		NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+		NETIF_F_RXHASH | NETIF_F_LRO;
+
+	ndev->vlan_features |= ndev->features;
+	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
+		NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	ndev->hw_features |= ndev->features | NETIF_F_RXALL;
+
+	if (pci_64)
+		ndev->features |= NETIF_F_HIGHDMA;
+
+	ndev->features |= NETIF_F_NTUPLE;
+
+	ndev->priv_flags |= IFF_UNICAST_FLT;
+
+	timer_setup(&nic->link_timer, &atl_link_timer, 0);
+
+	hw->mcp.ops->set_default_link(hw);
+	hw->link_state.force_off = 1;
+	hw->intr_mask = BIT(ATL_NUM_NON_RING_IRQS) - 1;
+	ndev->netdev_ops = &atl_ndev_ops;
+	ndev->mtu = 1500;
+#ifdef ATL_HAVE_MINMAX_MTU
+	ndev->max_mtu = nic->max_mtu;
+#endif
+	ndev->ethtool_ops = &atl_ethtool_ops;
+	ret = register_netdev(ndev);
+	if (ret)
+		goto err_register;
+
+	pci_set_drvdata(pdev, nic);
+	netif_carrier_off(ndev);
+
+	ret = atl_alloc_link_intr(nic);
+	if (ret)
+		goto err_link_intr;
+
+	ret = atl_hwmon_init(nic);
+	if (ret)
+		goto err_hwmon_init;
+
+	atl_start_hw_global(nic);
+	if (atl_keep_link)
+		atl_link_up(nic);
+
+	return 0;
+
+err_hwmon_init:
+	atl_free_link_intr(nic);
+err_link_intr:
+	unregister_netdev(nic->ndev);
+err_register:
+	atl_clear_datapath(nic);
+err_datapath:
+err_hwinit:
+	iounmap(hw->regs);
+err_ioremap:
+	disable_needed = test_and_clear_bit(ATL_ST_ENABLED, &nic->state);
+	free_netdev(ndev);
+err_alloc_ndev:
+	pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+	if (!nic || disable_needed)
+		pci_disable_device(pdev);
+	return ret;
+}
+
+static void atl_remove(struct pci_dev *pdev)
+{
+	int disable_needed;
+	struct atl_nic *nic = pci_get_drvdata(pdev);
+
+	if (!nic)
+		return;
+
+	netif_carrier_off(nic->ndev);
+	atl_intr_disable_all(&nic->hw);
+	/* atl_hw_reset(&nic->hw); */
+	atl_free_link_intr(nic);
+	unregister_netdev(nic->ndev);
+	atl_fwd_release_rings(nic);
+	atl_clear_datapath(nic);
+	iounmap(nic->hw.regs);
+	disable_needed = test_and_clear_bit(ATL_ST_ENABLED, &nic->state);
+	cancel_work_sync(&nic->work);
+	free_netdev(nic->ndev);
+	pci_release_regions(pdev);
+	if (disable_needed)
+		pci_disable_device(pdev);
+}
+
+static int atl_suspend_common(struct device *dev, bool deep)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct atl_nic *nic = pci_get_drvdata(pdev);
+	struct atl_hw *hw = &nic->hw;
+	int ret;
+
+	rtnl_lock();
+	netif_device_detach(nic->ndev);
+
+	if (netif_running(nic->ndev))
+		atl_do_close(nic);
+
+	if (deep && atl_keep_link)
+		atl_link_down(nic);
+
+	if (deep && nic->flags & ATL_FL_WOL) {
+		ret = hw->mcp.ops->enable_wol(hw);
+		if (ret)
+			atl_dev_err("Enable WoL failed: %d\n", -ret);
+	}
+
+	pci_disable_device(pdev);
+	__clear_bit(ATL_ST_ENABLED, &nic->state);
+
+	rtnl_unlock();
+
+	return 0;
+}
+
+static int atl_suspend_poweroff(struct device *dev)
+{
+	return atl_suspend_common(dev, true);
+}
+
+static int atl_freeze(struct device *dev)
+{
+	return atl_suspend_common(dev, false);
+}
+
+static int atl_resume_common(struct device *dev, bool deep)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct atl_nic *nic = pci_get_drvdata(pdev);
+	int ret;
+
+	rtnl_lock();
+
+	ret = pci_enable_device_mem(pdev);
+	if (ret)
+		goto exit;
+
+	pci_set_master(pdev);
+	__set_bit(ATL_ST_ENABLED, &nic->state);
+
+	if (deep) {
+		ret = atl_hw_reset(&nic->hw);
+		if (ret)
+			goto exit;
+
+		atl_start_hw_global(nic);
+	}
+
+	if (netif_running(nic->ndev))
+		ret = atl_do_open(nic);
+
+	if (deep && atl_keep_link)
+		atl_link_up(nic);
+
+	if (ret)
+		goto exit;
+
+	netif_device_attach(nic->ndev);
+
+exit:
+	rtnl_unlock();
+
+	return ret;
+}
+
+static int atl_resume_restore(struct device *dev)
+{
+	return atl_resume_common(dev, true);
+}
+
+static int atl_thaw(struct device *dev)
+{
+	return atl_resume_common(dev, false);
+}
+
+static void atl_shutdown(struct pci_dev *pdev)
+{
+	atl_suspend_common(&pdev->dev, true);
+}
+
+const struct dev_pm_ops atl_pm_ops = {
+	.suspend = atl_suspend_poweroff,
+	.poweroff = atl_suspend_poweroff,
+	.freeze = atl_freeze,
+	.resume = atl_resume_restore,
+	.restore = atl_resume_restore,
+	.thaw = atl_thaw,
+};
+
+static struct pci_driver atl_pci_ops = {
+	.name = atl_driver_name,
+	.id_table = atl_pci_tbl,
+	.probe = atl_probe,
+	.remove = atl_remove,
+	.shutdown = atl_shutdown,
+#ifdef CONFIG_PM
+	.driver.pm = &atl_pm_ops,
+#endif
+};
+
+static int __init atl_module_init(void)
+{
+	int ret;
+
+	atl_wq = create_singlethread_workqueue(atl_driver_name);
+	if (!atl_wq) {
+		pr_err("%s: Couldn't create workqueue\n", atl_driver_name);
+		return -ENOMEM;
+	}
+
+	ret = pci_register_driver(&atl_pci_ops);
+	if (ret) {
+		destroy_workqueue(atl_wq);
+		return ret;
+	}
+
+	return 0;
+}
+module_init(atl_module_init);
+
+static void __exit atl_module_exit(void)
+{
+	pci_unregister_driver(&atl_pci_ops);
+
+	if (atl_wq) {
+		destroy_workqueue(atl_wq);
+		atl_wq = NULL;
+	}
+}
+module_exit(atl_module_exit);
+
+MODULE_DEVICE_TABLE(pci, atl_pci_tbl);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(ATL_VERSION);
+MODULE_AUTHOR("Aquantia Corp.");
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_regs.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_regs.h
new file mode 100644
index 0000000..aefb5d5
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_regs.h
@@ -0,0 +1,182 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_REGS_H_
+#define _ATL_REGS_H_
+
+#define ATL_REG_STRIDE(base, stride, idx) ((base) + (stride) * (idx))
+
+/* Ring registers common for Rx and Tx */
+#define ATL_RING_OFFT(ring, offt)				\
+	(((struct atl_hw_ring *)(ring))->reg_base + (offt))
+#define ATL_RING_BASE_LSW(ring) ATL_RING_OFFT(ring, 0)
+#define ATL_RING_BASE_MSW(ring) ATL_RING_OFFT(ring, 4)
+#define ATL_RING_CTL(ring) ATL_RING_OFFT(ring, 8)
+#define ATL_RING_HEAD(ring) ATL_RING_OFFT(ring, 0xc)
+#define ATL_RING_TAIL(ring) ATL_RING_OFFT(ring, 0x10)
+#define ATL_RING_STS(ring) ATL_RING_OFFT(ring, 0x14)
+
+/* MIF @ 0x0000*/
+#define ATL_GLOBAL_STD_CTRL 0
+#define ATL_GLOBAL_FW_ID 0xc
+#define ATL_GLOBAL_CHIP_ID 0x10
+#define ATL_GLOBAL_CHIP_REV 0x14
+#define ATL_GLOBAL_FW_IMAGE_ID 0x18
+#define ATL_GLOBAL_MIF_ID 0x1c
+#define ATL_GLOBAL_MBOX_CTRL 0x200
+#define ATL_GLOBAL_MBOX_CRC 0x204
+#define ATL_GLOBAL_MBOX_ADDR 0x208
+#define ATL_GLOBAL_MBOX_DATA 0x20c
+#define ATL_GLOBAL_MDIO_CTL 0x280
+#define ATL_GLOBAL_MDIO_CMD 0x284
+#define ATL_GLOBAL_MDIO_WDATA 0x288
+#define ATL_GLOBAL_MDIO_ADDR 0x28c
+#define ATL_GLOBAL_MDIO_RDATA 0x290
+/* Scratch pads numbered starting from 1 */
+#define ATL_MCP_SCRATCH(idx) ATL_REG_STRIDE(0x300 - 0x4, 0x4, idx)
+#define ATL_MCP_SEM(idx) ATL_REG_STRIDE(0x3a0, 0x4, idx)
+#define ATL_MCP_SEM_MDIO 0
+#define ATL_MCP_SEM_MSM 1
+#define ATL_GLOBAL_CTRL2 0x404
+#define ATL_GLOBAL_DAISY_CHAIN_STS1 0x704
+
+enum mcp_scratchpad {
+	FW2_MBOX_DATA = 11,	/* 0x328 */
+	FW2_MBOX_CMD = 12,	/* 0x32c */
+	FW_STAT_STRUCT = 25, 	/* 0x360 */
+	FW2_EFUSE_SHADOW = 26,	/* 0x364 */
+	FW1_LINK_REQ = 27,
+	FW2_LINK_REQ_LOW = 27,	/* 0x368 */
+	FW1_LINK_STS = 28,
+	FW2_LINK_REQ_HIGH = 28,	/* 0x36c */
+	FW2_LINK_RES_LOW = 29,	/* 0x370 */
+	FW1_EFUSE_SHADOW = 30,
+	FW2_LINK_RES_HIGH = 30,	/* 0x374 */
+	RBL_STS = 35,		/* 0x388 */
+};
+
+/* INTR @ 0x2000 */
+#define ATL_INTR_STS 0x2000
+#define ATL_INTR_MSK 0x2010
+#define ATL_INTR_MSK_SET 0x2060
+#define ATL_INTR_MSK_CLEAR 0x2070
+#define ATL_INTR_AUTO_CLEAR 0x2080
+#define ATL_INTR_AUTO_MASK 0x2090
+#define ATL_INTR_RING_INTR_MAP(idx) ATL_REG_STRIDE(0x2100, 0x4, (idx) >> 1)
+#define ATL_INTR_GEN_INTR_MAP4 0x218c
+#define ATL_INTR_RSC_EN 0x2200
+#define ATL_INTR_RSC_DELAY 0x2204
+#define ATL_INTR_CTRL 0x2300
+#define ATL_INTR_THRTL(idx) ATL_REG_STRIDE(0x2800, 4, idx)
+
+/* MPI @ 0x4000 */
+#define ATL_MPI_CTRL1 0x4000
+#define ATL_MPI_MSM_ADDR 0x4400
+#define ATL_MPI_MSM_WR 0x4404
+#define ATL_MPI_MSM_RD 0x4408
+
+/* RX @ 0x5000 */
+#define ATL_RX_CTRL1 0x5000
+#define ATL_RX_FLT_CTRL1 0x5100
+#define ATL_RX_FLT_CTRL2 0x5104
+#define ATL_UC_FLT_NUM 37
+#define ATL_RX_UC_FLT_REG1(idx) ATL_REG_STRIDE(0x5110, 8, idx)
+#define ATL_RX_UC_FLT_REG2(idx) ATL_REG_STRIDE(0x5114, 8, idx)
+#define ATL_MC_FLT_NUM 8
+#define ATL_RX_MC_FLT(idx) ATL_REG_STRIDE(0x5250, 4, idx)
+#define ATL_RX_MC_FLT_MSK 0x5270
+#define ATL_RX_VLAN_FLT_CTRL1 0x5280
+#define ATL_VLAN_FLT_NUM 16
+#define ATL_RX_VLAN_FLT(idx) ATL_REG_STRIDE(0x5290, 4, idx)
+#define ATL_RX_ETYPE_FLT(idx) ATL_REG_STRIDE(0x5300, 4, idx)
+#define ATL_ETYPE_FLT_NUM 16
+#define ATL_NTUPLE_CTRL(idx) ATL_REG_STRIDE(0x5380, 4, idx)
+#define ATL_NTUPLE_SADDR(idx) ATL_REG_STRIDE(0x53b0, 4, idx)
+#define ATL_NTUPLE_DADDR(idx) ATL_REG_STRIDE(0x53d0, 4, idx)
+#define ATL_NTUPLE_SPORT(idx) ATL_REG_STRIDE(0x5400, 4, idx)
+#define ATL_NTUPLE_DPORT(idx) ATL_REG_STRIDE(0x5420, 4, idx)
+#define ATL_NTUPLE_FLT_NUM 8
+#define ATL_RX_RSS_CTRL 0x54c0
+#define ATL_RX_RSS_KEY_ADDR 0x54d0
+#define ATL_RX_RSS_KEY_WR_DATA 0x54d4
+#define ATL_RX_RSS_KEY_RD_DATA 0x54d8
+#define ATL_RX_RSS_TBL_ADDR 0x54e0
+#define ATL_RX_RSS_TBL_WR_DATA 0x54e4
+#define ATL_RX_RSS_TBL_RD_DATA 0x54e8
+#define ATL_RX_RPF_DBG_CNT_CTRL 0x5518
+#define ATL_RX_RPF_HOST_CNT_LO 0x552c
+#define ATL_RX_RPF_HOST_CNT_HI 0x5530
+#define ATL_RX_RPF_LOST_CNT_LO 0x554c
+#define ATL_RX_RPF_LOST_CNT_HI 0x5550
+#define ATL_RX_PO_CTRL1 0x5580
+#define ATL_RX_LRO_CTRL1 0x5590
+#define ATL_RX_LRO_CTRL2 0x5594
+#define ATL_RX_LRO_PKT_LIM_EN 0x5598
+#define ATL_RX_LRO_PKT_LIM(idx) ATL_REG_STRIDE(0x55a0, 4, (idx) >> 3)
+#define ATL_RX_LRO_TMRS 0x5620
+#define ATL_RX_PBUF_CTRL1 0x5700
+#define ATL_RX_PBUF_REG1(idx) ATL_REG_STRIDE(0x5710, 0x10, idx)
+#define ATL_RX_PBUF_REG2(idx) ATL_REG_STRIDE(0x5714, 0x10, idx)
+#define ATL_RX_INTR_CTRL 0x5a30
+#define ATL_RX_INTR_MOD_CTRL(idx) ATL_REG_STRIDE(0x5a40, 4, idx)
+
+/* Rx rings */
+#define ATL_RX_RING(idx) ATL_REG_STRIDE(0x5b00, 0x20, idx)
+#define ATL_RX_RING_BASE_LSW(ring) ATL_RING_BASE_LSW(ring)
+#define ATL_RX_RING_BASE_MSW(ring) ATL_RING_BASE_MSW(ring)
+#define ATL_RX_RING_CTL(ring) ATL_RING_CTL(ring)
+#define ATL_RX_RING_HEAD(ring) ATL_RING_HEAD(ring)
+#define ATL_RX_RING_TAIL(ring) ATL_RING_TAIL(ring)
+#define ATL_RX_RING_STS(ring) ATL_RING_STS(ring)
+#define ATL_RX_RING_BUF_SIZE(ring) ATL_RING_OFFT(ring, 0x18)
+#define ATL_RX_RING_THRESH(ring) ATL_RING_OFFT(ring, 0x1c)
+
+#define ATL_RX_DMA_STATS_CNT7 0x6818
+
+/* TX @ 0x7000 */
+#define ATL_TX_CTRL1 0x7000
+#define ATL_TX_PO_CTRL1 0x7800
+#define ATL_TX_LSO_CTRL 0x7810
+#define ATL_TX_LSO_TCP_CTRL1 0x7820
+#define ATL_TX_LSO_TCP_CTRL2 0x7824
+#define ATL_TX_PBUF_CTRL1 0x7900
+#define ATL_TX_PBUF_REG1(idx) ATL_REG_STRIDE(0x7910, 0x10, idx)
+#define ATL_TX_INTR_CTRL 0x7b40
+
+/* Tx rings */
+#define ATL_TX_RING(idx) ATL_REG_STRIDE(0x7c00, 0x40, idx)
+#define ATL_TX_RING_BASE_LSW(ring) ATL_RING_BASE_LSW(ring)
+#define ATL_TX_RING_BASE_MSW(ring) ATL_RING_BASE_MSW(ring)
+#define ATL_TX_RING_CTL(ring) ATL_RING_CTL(ring)
+#define ATL_TX_RING_HEAD(ring) ATL_RING_HEAD(ring)
+#define ATL_TX_RING_TAIL(ring) ATL_RING_TAIL(ring)
+#define ATL_TX_RING_STS(ring) ATL_RING_STS(ring)
+#define ATL_TX_RING_THRESH(ring) ATL_RING_OFFT(ring, 0x18)
+#define ATL_TX_RING_HEAD_WB_LSW(ring) ATL_RING_OFFT(ring, 0x1c)
+#define ATL_TX_RING_HEAD_WB_MSW(ring) ATL_RING_OFFT(ring, 0x20)
+
+#define ATL_TX_INTR_MOD_CTRL(idx) ATL_REG_STRIDE(0x8980, 0x4, idx)
+
+/* MSM */
+#define ATL_MSM_GEN_CTRL 0x8
+#define ATL_MSM_GEN_STS 0x40
+#define ATL_MSM_TX_LPI_DELAY 0x78
+#define ATL_MSM_CTR_RX_PKTS_GOOD 0x88
+#define ATL_MSM_CTR_RX_FCS_ERRS 0x90
+#define ATL_MSM_CTR_RX_ALIGN_ERRS 0x98
+#define ATL_MSM_CTR_TX_PAUSE 0xa0
+#define ATL_MSM_CTR_RX_PAUSE 0xa8
+#define ATL_MSM_CTR_RX_OCTETS_LO 0xd8
+#define ATL_MSM_CTR_RX_OCTETS_HI 0xdc
+#define ATL_MSM_CTR_RX_MULTICAST 0xE8
+#define ATL_MSM_CTR_RX_BROADCAST 0xF0
+#define ATL_MSM_CTR_RX_ERRS 0x120
+
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.c
new file mode 100644
index 0000000..2496fe0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.c
@@ -0,0 +1,1680 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include "atl_ring.h"
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <net/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+
+#include "atl_trace.h"
+
+#define atl_update_ring_stat(ring, stat, delta)			\
+do {								\
+	struct atl_desc_ring *_ring = (ring);			\
+								\
+	u64_stats_update_begin(&_ring->syncp);			\
+	_ring->stats.stat += (delta);				\
+	u64_stats_update_end(&_ring->syncp);			\
+} while (0)
+
+static inline uint32_t fetch_tx_head(struct atl_desc_ring *ring)
+{
+#ifdef ATL_TX_HEAD_WB
+	//XXX
+#else
+	return atl_read(ring_hw(ring), ATL_TX_RING_HEAD(ring));
+#endif
+}
+
+static int tx_full(struct atl_desc_ring *ring, int needed)
+{
+	struct atl_nic *nic = ring->qvec->nic;
+
+	if (likely(ring_space(ring) >= needed))
+		return 0;
+
+	netif_stop_subqueue(ring->qvec->nic->ndev, ring->qvec->idx);
+	atl_nic_dbg("Stopping tx queue\n");
+
+	smp_mb();
+
+	// Check if another CPU freed some space
+	if (likely(ring_space(ring) < needed))
+		return -EAGAIN;
+
+	netif_start_subqueue(ring->qvec->nic->ndev, ring->qvec->idx);
+	atl_nic_dbg("Restarting tx queue in %s...\n", __func__);
+	atl_update_ring_stat(ring, tx.tx_restart, 1);
+	return 0;
+}
+
+static void atl_txbuf_free(struct atl_txbuf *txbuf, struct device *dev,
+	uint32_t idx)
+{
+	if (txbuf->skb) {
+		if (dma_unmap_len(txbuf, len)) {
+			dma_unmap_single(dev, dma_unmap_addr(txbuf, daddr),
+					 dma_unmap_len(txbuf, len),
+					 DMA_TO_DEVICE);
+			trace_atl_dma_unmap_head(-1, idx,
+				dma_unmap_addr(txbuf, daddr),
+				dma_unmap_len(txbuf, len),
+				txbuf->skb);
+		}
+		dev_kfree_skb_any(txbuf->skb);
+	} else if (dma_unmap_len(txbuf, len)) {
+		dma_unmap_page(dev, dma_unmap_addr(txbuf, daddr),
+			       dma_unmap_len(txbuf, len),
+			       DMA_TO_DEVICE);
+		trace_atl_dma_unmap_frag(-1, idx, dma_unmap_addr(txbuf, daddr),
+			dma_unmap_len(txbuf, len), txbuf->skb);
+	}
+
+	txbuf->last = -1;
+	txbuf->skb = NULL;
+	dma_unmap_len_set(txbuf, len, 0);
+}
+
+static inline struct netdev_queue *atl_txq(struct atl_desc_ring *ring)
+{
+	return netdev_get_tx_queue(ring->qvec->nic->ndev,
+		ring->qvec->idx);
+}
+
+static unsigned int atl_tx_free_low = MAX_SKB_FRAGS + 4;
+module_param_named(tx_free_low, atl_tx_free_low, uint, 0644);
+
+static unsigned int atl_tx_free_high = MAX_SKB_FRAGS * 3;
+module_param_named(tx_free_high, atl_tx_free_high, uint, 0644);
+
+static inline int skb_xmit_more(struct sk_buff *skb)
+{
+	return skb->xmit_more;
+}
+
+static netdev_tx_t atl_map_xmit_skb(struct sk_buff *skb,
+	struct atl_desc_ring *ring, struct atl_txbuf *first_buf)
+{
+	int idx = ring->tail;
+	struct device *dev = ring->qvec->dev;
+	struct atl_tx_desc *desc = &ring->desc.tx;
+	struct skb_frag_struct *frag;
+	/* Header's DMA mapping must be stored in the txbuf that has
+	 * ->skb set, even if it corresponds to the context
+	 * descriptor and not the first data descriptor
+	 */
+	struct atl_txbuf *txbuf = first_buf;
+	unsigned int len = skb_headlen(skb);
+	unsigned int frags = skb_shinfo(skb)->nr_frags;
+	dma_addr_t daddr = dma_map_single(dev, skb->data, len,
+					  DMA_TO_DEVICE);
+	trace_atl_dma_map_head(-1, idx, daddr, len, skb, skb->data);
+
+	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		if (dma_mapping_error(dev, daddr))
+			goto err_dma;
+
+		dma_unmap_len_set(txbuf, len, len);
+		dma_unmap_addr_set(txbuf, daddr, daddr);
+
+		desc->daddr = cpu_to_le64(daddr);
+		while (len > ATL_DATA_PER_TXD) {
+			desc->len = cpu_to_le16(ATL_DATA_PER_TXD);
+			WRITE_ONCE(ring->hw.descs[idx].tx, *desc);
+			bump_ptr(idx, ring, 1);
+			daddr += ATL_DATA_PER_TXD;
+			len -= ATL_DATA_PER_TXD;
+			desc->daddr = cpu_to_le64(daddr);
+		}
+		desc->len = cpu_to_le16(len);
+
+		if (!frags)
+			break;
+
+		WRITE_ONCE(ring->hw.descs[idx].tx, *desc);
+		bump_ptr(idx, ring, 1);
+		txbuf = &ring->txbufs[idx];
+		len = skb_frag_size(frag);
+		daddr = skb_frag_dma_map(dev, frag, 0, len,
+					 DMA_TO_DEVICE);
+		trace_atl_dma_map_frag(frag - &skb_shinfo(skb)->frags[0], idx,
+				       daddr, len, skb, skb_frag_address(frag));
+
+		frags--;
+	}
+
+	//Last descriptor
+	desc->eop = 1;
+#if defined(ATL_TX_DESC_WB) || defined(ATL_TX_HEAD_WB)
+	desc->cmd |= tx_desc_cmd_wb;
+#endif
+	WRITE_ONCE(ring->hw.descs[idx].tx, *desc);
+	first_buf->last = idx;
+	bump_ptr(idx, ring, 1);
+	ring->txbufs[idx].last = -1;
+	ring->tail = idx;
+
+	/* Stop queue if no space for another packet */
+	tx_full(ring, atl_tx_free_low);
+
+	/* Delay bumping the HW tail if another packet is pending and
+	 * there's space for it.
+	 */
+	if (skb_xmit_more(skb) && !netif_xmit_stopped(atl_txq(ring)))
+		return NETDEV_TX_OK;
+
+	wmb();
+	atl_write(ring_hw(ring), ATL_TX_RING_TAIL(ring), ring->tail);
+
+	return NETDEV_TX_OK;
+
+err_dma:
+	dev_err(dev, "atl_map_skb failed\n");
+	for (;;) {
+		atl_txbuf_free(txbuf, dev, idx);
+		if (txbuf == first_buf)
+			break;
+		bump_ptr(idx, ring, -1);
+		txbuf = &ring->txbufs[idx];
+	}
+	ring->tail = idx;
+	atl_update_ring_stat(ring, tx.dma_map_failed, 1);
+	return -EFAULT;
+}
+
+static uint32_t atl_insert_context(struct atl_txbuf *txbuf,
+	struct atl_desc_ring *ring, unsigned int *len)
+{
+	struct sk_buff *skb = txbuf->skb;
+	struct atl_tx_ctx *ctx;
+	unsigned int hdr_len;
+	uint32_t tx_cmd = 0;
+	int mss;
+	DECLARE_SCRATCH_DESC(scratch);
+
+	ctx = &DESC_PTR(ring, ring->tail, scratch)->ctx;
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	txbuf->bytes = *len;
+	txbuf->packets = 1;
+
+	mss = skb_shinfo(skb)->gso_size;
+
+	if (mss && (skb_shinfo(skb)->gso_type &
+		    (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
+		tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
+		ctx->mss_len = mss;
+		ctx->cmd = ctx_cmd_tcp;
+
+		ctx->l2_len = skb_network_offset(skb);
+
+		if (skb_is_gso_v6(skb))
+			ctx->cmd |= ctx_cmd_ipv6;
+
+		ctx->l3_len = skb_transport_offset(skb) - ctx->l2_len;
+		ctx->l4_len = tcp_hdrlen(skb);
+
+		hdr_len = ctx->l2_len + ctx->l3_len + ctx->l4_len;
+
+		*len -= hdr_len;
+		txbuf->packets = skb_shinfo(skb)->gso_segs;
+		txbuf->bytes += (txbuf->packets - 1) * hdr_len;
+	}
+
+	if (skb_vlan_tag_present(skb)) {
+		tx_cmd |= tx_desc_cmd_vlan;
+		ctx->vlan_tag = skb_vlan_tag_get(skb);
+	}
+
+	if (tx_cmd) {
+		ctx->type = tx_desc_type_context;
+		ctx->idx = 0;
+		COMMIT_DESC(ring, ring->tail, scratch);
+		bump_tail(ring, 1);
+	}
+
+	return tx_cmd;
+}
+
+netdev_tx_t atl_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_desc_ring *ring = &nic->qvecs[skb->queue_mapping].tx;
+	unsigned int len = skb->len;
+	struct atl_tx_desc *desc;
+	struct atl_txbuf *txbuf;
+	uint32_t cmd_from_ctx;
+
+	if (tx_full(ring, skb_shinfo(skb)->nr_frags + 4)) {
+		atl_update_ring_stat(ring, tx.tx_busy, 1);
+		return NETDEV_TX_BUSY;
+	}
+
+	txbuf = &ring->txbufs[ring->tail];
+
+	txbuf->skb = skb;
+	cmd_from_ctx = atl_insert_context(txbuf, ring, &len);
+
+	/* use ring->desc unconditionally as it will serve as a
+	 * template for all descriptors
+	 */
+	desc = &ring->desc.tx;
+
+	memset(desc, 0, sizeof(*desc));
+
+	desc->cmd = cmd_from_ctx;
+	desc->cmd |= tx_desc_cmd_fcs;
+	desc->ct_en = !!cmd_from_ctx;
+	desc->type = tx_desc_type_desc;
+
+	desc->pay_len = len;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		uint8_t l4_proto = 0;
+
+		switch (skb->protocol) {
+		case htons(ETH_P_IP):
+			desc->cmd |= tx_desc_cmd_ipv4cs;
+			l4_proto = ip_hdr(skb)->protocol;
+			break;
+		case htons(ETH_P_IPV6):
+			l4_proto = ipv6_hdr(skb)->nexthdr;
+			break;
+		}
+
+		switch (l4_proto) {
+		case IPPROTO_TCP:
+		case IPPROTO_UDP:
+			desc->cmd |= tx_desc_cmd_l4cs;
+			break;
+		}
+	}
+
+	return atl_map_xmit_skb(skb, ring, txbuf);
+}
+
+static unsigned int atl_tx_clean_budget = 256;
+module_param_named(tx_clean_budget, atl_tx_clean_budget, uint, 0644);
+
+// Returns true if all work done
+static bool atl_clean_tx(struct atl_desc_ring *ring)
+{
+	struct atl_nic *nic = ring->qvec->nic;
+	struct device *dev = ring->qvec->dev;
+	uint32_t first = READ_ONCE(ring->head);
+#ifndef ATL_TX_DESC_WB
+	uint32_t done = atl_get_tx_head(ring);
+#endif
+	uint32_t budget = atl_tx_clean_budget;
+	unsigned int bytes = 0, packets = 0;
+	struct atl_tx_desc *last_desc;
+
+	atl_nic_dbg("descs in ring: %d\n", ring_occupied(ring));
+	do {
+		struct atl_txbuf *txbuf = &ring->txbufs[first];
+		struct sk_buff *skb = txbuf->skb;
+		uint32_t last = txbuf->last;
+
+		if (last == -1)
+			break;
+
+#ifdef ATL_TX_DESC_WB
+		last_desc = &ring->hw.descs[last].tx;
+
+		if (!last_desc->dd)
+			break;
+#else
+		if ((first <= last && done >= first && done <= last) ||
+		    ((first > last) && (done >= first || done <= last)))
+			break;
+#endif
+
+		bump_ptr(last, ring, 1);
+		napi_consume_skb(txbuf->skb, budget);
+		trace_atl_dma_unmap_head(-1, first,
+					 dma_unmap_addr(txbuf, daddr),
+					 dma_unmap_len(txbuf, len), skb);
+
+		txbuf->skb = NULL;
+		txbuf->last = -1;
+		dma_unmap_single(dev, dma_unmap_addr(txbuf, daddr),
+				 dma_unmap_len(txbuf, len), DMA_TO_DEVICE);
+		dma_unmap_len_set(txbuf, len, 0);
+
+		bytes += txbuf->bytes;
+		packets += txbuf->packets;
+
+		for (bump_ptr(first, ring, 1); first != last;
+		     bump_ptr(first, ring, 1)) {
+			txbuf = &ring->txbufs[first];
+			if (dma_unmap_len(txbuf, len)) {
+				dma_unmap_page(dev,
+					dma_unmap_addr(txbuf, daddr),
+					dma_unmap_len(txbuf, len),
+					DMA_TO_DEVICE);
+				trace_atl_dma_unmap_frag(-1, first,
+					dma_unmap_addr(txbuf, daddr),
+					dma_unmap_len(txbuf, len), skb);
+				dma_unmap_len_set(txbuf, len, 0);
+			}
+		}
+	} while (--budget);
+
+	u64_stats_update_begin(&ring->syncp);
+	ring->stats.tx.bytes += bytes;
+	ring->stats.tx.packets += packets;
+	u64_stats_update_end(&ring->syncp);
+
+	WRITE_ONCE(ring->head, first);
+
+	if (ring_space(ring) > atl_tx_free_high) {
+		struct net_device *ndev = nic->ndev;
+
+		smp_mb();
+		if (__netif_subqueue_stopped(ndev, ring->qvec->idx) &&
+			test_bit(ATL_ST_UP, &nic->state)) {
+			atl_nic_dbg("restarting tx queue\n");
+			netif_wake_subqueue(ndev, ring->qvec->idx);
+			atl_update_ring_stat(ring, tx.tx_restart, 1);
+		}
+	}
+
+	return !!budget;
+}
+
+static bool atl_rx_checksum(struct sk_buff *skb, struct atl_rx_desc_wb *desc,
+	struct atl_desc_ring *ring)
+{
+	struct atl_nic *nic = ring->qvec->nic;
+	struct net_device *ndev = nic->ndev;
+	int csum_ok = 1, recheck = 0;
+
+	skb_checksum_none_assert(skb);
+
+	if (desc->rx_stat & atl_rx_stat_mac_err) {
+		atl_update_ring_stat(ring, rx.mac_err, 1);
+		atl_nic_dbg("rx MAC err: rx_stat %d pkt_type %d len %d\n",
+			desc->rx_stat, desc->pkt_type, desc->pkt_len);
+		goto drop;
+	}
+
+	if (!(ndev->features & NETIF_F_RXCSUM))
+		return true;
+
+	switch (desc->pkt_type & atl_rx_pkt_type_l3_msk) {
+	case atl_rx_pkt_type_ipv4:
+		csum_ok &= !(desc->rx_stat & atl_rx_stat_ipv4_err);
+		/* Fallthrough */
+	case atl_rx_pkt_type_ipv6:
+		break;
+	default:
+		return true;
+	}
+
+	switch (desc->pkt_type & atl_rx_pkt_type_l4_msk) {
+	case atl_rx_pkt_type_tcp:
+	case atl_rx_pkt_type_udp:
+		recheck = desc->pkt_len <= 60;
+		csum_ok &= !(desc->rx_stat & atl_rx_stat_l4_err);
+		break;
+	default:
+		return true;
+	}
+
+	if (csum_ok) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		return true;
+	} else if (recheck)
+		return true;
+
+	atl_update_ring_stat(ring, rx.csum_err, 1);
+
+	atl_nic_dbg("bad rx checksum: rx_stat %d pkt_type %d len %d\n",
+		    desc->rx_stat, desc->pkt_type, desc->pkt_len);
+
+	if (ndev->features & NETIF_F_RXALL)
+		return true;
+
+drop:
+	dev_kfree_skb_any(skb);
+	return false;
+}
+
+static void atl_rx_hash(struct sk_buff *skb, struct atl_rx_desc_wb *desc,
+	struct net_device *ndev)
+{
+	uint8_t rss_type = desc->rss_type;
+
+	if (!(ndev->features & NETIF_F_RXHASH) || rss_type < 2 || rss_type > 7)
+		return;
+
+	skb_set_hash(skb, le32_to_cpu(desc->rss_hash),
+		(rss_type > 3 && rss_type < 8) ? PKT_HASH_TYPE_L4 :
+		PKT_HASH_TYPE_L3);
+}
+
+static bool atl_rx_packet(struct sk_buff *skb, struct atl_rx_desc_wb *desc,
+			  struct atl_desc_ring *ring)
+{
+	struct net_device *ndev = ring->qvec->nic->ndev;
+	struct napi_struct *napi = &ring->qvec->napi;
+
+	if (!atl_rx_checksum(skb, desc, ring))
+		return false;
+
+	if (!skb_is_nonlinear(skb) && eth_skb_pad(skb))
+		return false;
+
+	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX
+	    && desc->rx_estat & atl_rx_estat_vlan_stripped) {
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+				       le16_to_cpu(desc->vlan_tag));
+	}
+
+	atl_rx_hash(skb, desc, ndev);
+
+	skb_record_rx_queue(skb, ring->qvec->idx);
+	skb->protocol = eth_type_trans(skb, ndev);
+	if (skb->pkt_type == PACKET_MULTICAST)
+		atl_update_ring_stat(ring, rx.multicast, 1);
+	napi_gro_receive(napi, skb);
+	return true;
+}
+
+unsigned int atl_rx_linear;
+module_param_named(rx_linear, atl_rx_linear, uint, 0444);
+
+/* DMA mappings of buffer pages are accounted via struct
+ * atl_rxpage. Being mapped counts as a single additional reference
+ * for the target page.
+ */
+static int atl_get_page(struct atl_pgref *pgref, unsigned int order,
+	struct device *dev)
+{
+	struct atl_rxpage *rxpage;
+	struct page *page;
+	dma_addr_t daddr;
+	int ret = -ENOMEM;
+
+	rxpage = kmalloc(sizeof(*rxpage), GFP_ATOMIC | __GFP_NOWARN);
+	if (unlikely(!rxpage))
+		return ret;
+
+	page = dev_alloc_pages(order);
+	if (unlikely(!page))
+		goto free_rxpage;
+
+	daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order, DMA_FROM_DEVICE);
+	trace_atl_dma_map_rxbuf(-1, -1, daddr, PAGE_SIZE << order, NULL,
+		page_to_virt(page));
+
+	if (unlikely(dma_mapping_error(dev, daddr)))
+		goto free_page;
+
+	rxpage->page = page;
+	rxpage->daddr = daddr;
+	rxpage->order = order;
+	rxpage->mapcount = 1;
+
+	pgref->rxpage = rxpage;
+	pgref->pg_off = 0;
+
+	return 0;
+
+free_page:
+	__free_pages(page, order);
+free_rxpage:
+	kfree(rxpage);
+
+	return ret;
+}
+
+static int atl_get_pages(struct atl_rxbuf *rxbuf,
+	struct atl_desc_ring *ring)
+{
+	int ret;
+	struct device *dev = ring->qvec->dev;
+
+	if (likely((rxbuf->head.rxpage || atl_rx_linear)
+			&& rxbuf->data.rxpage))
+		return 0;
+
+	if (!rxbuf->head.rxpage && !atl_rx_linear) {
+		ret = atl_get_page(&rxbuf->head, ATL_RX_HEAD_ORDER, dev);
+		if (ret) {
+			atl_update_ring_stat(ring,
+				rx.alloc_head_page_failed, 1);
+			return ret;
+		}
+		atl_update_ring_stat(ring, rx.alloc_head_page, 1);
+	}
+
+	if (!rxbuf->data.rxpage) {
+		ret = atl_get_page(&rxbuf->data, ATL_RX_DATA_ORDER, dev);
+		if (ret) {
+			atl_update_ring_stat(ring,
+				rx.alloc_data_page_failed, 1);
+			return ret;
+		}
+		atl_update_ring_stat(ring, rx.alloc_data_page, 1);
+	}
+
+	return 0;
+}
+
+static inline void atl_fill_rx_desc(struct atl_desc_ring *ring,
+	struct atl_rxbuf *rxbuf)
+{
+	struct atl_rx_desc *desc;
+	DECLARE_SCRATCH_DESC(scratch);
+
+	desc  = &DESC_PTR(ring, ring->tail, scratch)->rx;
+
+	desc->daddr = atl_buf_daddr(&rxbuf->data) +
+		(atl_rx_linear ? ATL_RX_HEADROOM : 0);
+
+	/* Assigning haddr clears dd as bufs are cacheline-aligned
+	 * and ATL_RX_HEADROOM is even
+	 */
+	desc->haddr = atl_rx_linear ? 0 :
+		atl_buf_daddr(&rxbuf->head) + ATL_RX_HEADROOM;
+
+	trace_atl_fill_rx_desc(ring->tail, desc);
+	COMMIT_DESC(ring, ring->tail, scratch);
+}
+
+static int atl_fill_rx(struct atl_desc_ring *ring, uint32_t count)
+{
+	int ret = 0;
+
+	while (count) {
+		struct atl_rxbuf *rxbuf = &ring->rxbufs[ring->tail];
+
+		ret = atl_get_pages(rxbuf, ring);
+		if (ret)
+			break;
+
+		atl_fill_rx_desc(ring, rxbuf);
+		bump_tail(ring, 1);
+		count--;
+	}
+
+	/* If tail ptr passed the next_to_recycle ptr, clamp the
+	 * latter to the former.
+	 */
+	if (ring->next_to_recycle < ring->head ?
+		ring->next_to_recycle < ring->tail &&
+		ring->tail < ring->head :
+		ring->tail > ring->next_to_recycle ||
+		ring->tail < ring->head)
+		ring->next_to_recycle = ring->tail;
+
+	wmb();
+	atl_write(ring_hw(ring), ATL_RX_RING_TAIL(ring), ring->tail);
+	return ret;
+}
+
+static inline void atl_get_rxpage(struct atl_pgref *pgref)
+{
+	pgref->rxpage->mapcount++;
+}
+
+static inline void __atl_free_rxpage(struct atl_rxpage *rxpage,
+	struct device *dev)
+{
+	unsigned int len = PAGE_SIZE << rxpage->order;
+
+	dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
+	trace_atl_dma_unmap_rxbuf(-1, -1, rxpage->daddr, len, NULL);
+
+	/* Drop the ref for dma mapping. */
+	__free_pages(rxpage->page, rxpage->order);
+	kfree(rxpage);
+}
+
+static inline void atl_put_rxpage(struct atl_pgref *pgref, struct device *dev)
+{
+	struct atl_rxpage *rxpage = pgref->rxpage;
+
+	if (!rxpage)
+		return;
+
+	if (--rxpage->mapcount)
+		return;
+
+	__atl_free_rxpage(rxpage, dev);
+	pgref->rxpage = 0;
+}
+
+static bool atl_recycle_or_put_page(struct atl_pgref *pgref,
+	unsigned int buf_len, struct device *dev)
+{
+	unsigned int order = pgref->rxpage->order;
+	unsigned int size = PAGE_SIZE << order;
+	struct page *page = pgref->rxpage->page;
+
+	if (!page_is_pfmemalloc(page) && pgref->pg_off + buf_len < size)
+		return true;
+
+	atl_put_rxpage(pgref, dev);
+
+	return false;
+}
+
+static void atl_maybe_recycle_rxbuf(struct atl_desc_ring *ring,
+	struct atl_rxbuf *rxbuf)
+{
+	int reused = 0;
+	struct atl_pgref *head = &rxbuf->head, *data = &rxbuf->data;
+	struct atl_rxbuf *new = &ring->rxbufs[ring->next_to_recycle];
+	unsigned int data_len = ATL_RX_BUF_SIZE +
+		(atl_rx_linear ? ATL_RX_HDR_OVRHD : 0);
+
+	if (!atl_rx_linear
+		&& atl_recycle_or_put_page(head,
+			ATL_RX_HDR_SIZE + ATL_RX_HDR_OVRHD, ring->qvec->dev)) {
+		new->head = *head;
+		reused = 1;
+		atl_update_ring_stat(ring, rx.reused_head_page, 1);
+	}
+	head->rxpage = 0;
+
+	if (atl_recycle_or_put_page(data, data_len, ring->qvec->dev)) {
+		new->data = *data;
+		reused = 1;
+		atl_update_ring_stat(ring, rx.reused_data_page, 1);
+	}
+	data->rxpage = 0;
+
+	if (reused)
+		bump_ptr(ring->next_to_recycle, ring, 1);
+}
+
+static unsigned int atl_data_len(struct atl_rx_desc_wb *wb)
+{
+	unsigned int len = le16_to_cpu(wb->pkt_len);
+
+	if (!wb->eop)
+		return ATL_RX_BUF_SIZE;
+
+	if (!wb->rsc_cnt && wb->sph)
+		len -= wb->hdr_len;
+
+	len &= ATL_RX_BUF_SIZE - 1;
+	return len ?: ATL_RX_BUF_SIZE;
+}
+
+static void atl_sync_range(struct atl_desc_ring *ring,
+	struct atl_pgref *pgref, unsigned int offt, unsigned int len)
+{
+	dma_addr_t daddr = pgref->rxpage->daddr;
+	unsigned int pg_off = pgref->pg_off + offt;
+
+	dma_sync_single_range_for_cpu(ring->qvec->dev, daddr, pg_off, len,
+		DMA_FROM_DEVICE);
+	trace_atl_sync_rx_range(-1, daddr, pg_off, len);
+}
+
+static struct sk_buff *atl_init_skb(struct atl_desc_ring *ring,
+	struct atl_rxbuf *rxbuf, struct atl_rx_desc_wb *wb)
+{
+	struct sk_buff *skb;
+	unsigned int hdr_len, alloc, tailroom, len;
+	unsigned int data_len = atl_data_len(wb);
+	void *hdr;
+	struct atl_pgref *pgref;
+	struct atl_nic *nic = ring->qvec->nic;
+
+	if (atl_rx_linear) {
+		if (!wb->eop) {
+			atl_nic_err("Multi-frag packet in linear mode\n");
+			atl_update_ring_stat(ring, rx.linear_dropped, 1);
+			return (void *)-1l;
+		}
+
+		hdr_len = len = data_len;
+		tailroom = 0;
+		pgref = &rxbuf->data;
+	} else {
+		hdr_len = wb->hdr_len;
+		if (hdr_len == 0) {
+			atl_nic_err("Header parse error\n");
+			return (void *)-1l;
+		}
+
+		/* If entire packet fits into ATL_RX_HDR_SIZE, reserve
+		 * enough space to pull the data part into skb head
+		 * and make it linear, otherwise allocate space for
+		 * hdr_len only
+		 */
+		len = (wb->sph ? hdr_len : 0) + data_len;
+		if (!wb->eop || len > ATL_RX_HDR_SIZE)
+			len = hdr_len;
+
+		/* reserve space for potential __pskb_pull_tail() */
+		tailroom = min(ATL_RX_TAILROOM, ATL_RX_HDR_SIZE - len);
+		pgref = &rxbuf->head;
+	}
+
+	if (atl_rx_linear || (wb->sph && (wb->eop || !wb->rsc_cnt)))
+		atl_sync_range(ring, pgref,
+			ATL_RX_HEADROOM, hdr_len);
+
+	alloc = len + tailroom + ATL_RX_HEADROOM;
+	alloc += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	alloc = SKB_DATA_ALIGN(alloc);
+
+	hdr = atl_buf_vaddr(pgref);
+	skb = build_skb(hdr, alloc);
+	if (unlikely(!skb)) {
+		atl_update_ring_stat(ring, rx.alloc_skb_failed, 1);
+		return NULL;
+	}
+
+	if (wb->rsc_cnt && !wb->eop) {
+		struct atl_cb *atl_cb = ATL_CB(skb);
+
+		/* First frag of a multi-frag RSC packet. Either head or
+		 * data buffer, depending on whether the header was
+		 * split off by HW, might still be accessed by
+		 * RSC. Delay processing till EOP.
+		 */
+		if (wb->sph) {
+			atl_cb->pgref = *pgref;
+			atl_cb->head = true;
+			/* Safe to sync the data buf. !wb->eop
+			 * implies the data buffer is completely filled.
+			 */
+			atl_sync_range(ring, &rxbuf->data, 0, ATL_RX_BUF_SIZE);
+		} else {
+			atl_cb->pgref = rxbuf->data;
+			atl_cb->head = false;
+			/* No need to sync head fragment as nothing
+			 * was DMA'd into it
+			 */
+		}
+		atl_get_rxpage(&atl_cb->pgref);
+	}
+
+	pgref->pg_off += alloc;
+	page_ref_inc(pgref->rxpage->page);
+
+	if (!atl_rx_linear && !wb->sph) {
+		atl_nic_dbg("Header not split despite non-zero hdr_len (%d)\n",
+			hdr_len);
+		/* Make skb head empty -- will copy the real header
+		 * from the data buffer later
+		 */
+		hdr_len = 0;
+	}
+
+	skb_reserve(skb, ATL_RX_HEADROOM);
+	skb_put(skb, hdr_len);
+	return skb;
+}
+
+static inline void atl_skb_put_data(struct sk_buff *skb,
+	void *data, unsigned int len)
+{
+	memcpy(skb_tail_pointer(skb), data, len);
+	skb->tail += len;
+	skb->len += len;
+}
+
+static struct sk_buff *atl_process_rx_frag(struct atl_desc_ring *ring,
+	struct atl_rxbuf *rxbuf, struct atl_rx_desc_wb *wb)
+{
+	bool first_frag = false;
+	bool hdr_split = !!wb->sph;
+	unsigned int hdr_len, data_len, aligned_data_len;
+	unsigned int data_offt = 0, to_pull = 0;
+	struct sk_buff *skb = rxbuf->skb;
+	struct atl_cb *atl_cb;
+	struct atl_pgref *headref = &rxbuf->head, *dataref = &rxbuf->data;
+	struct device *dev = ring->qvec->dev;
+
+	if (!skb) {
+		 /* First buffer of a packet */
+		skb = atl_init_skb(ring, rxbuf, wb);
+		first_frag = true;
+	} else
+		rxbuf->skb = NULL;
+
+	if (unlikely(!skb || skb == (void *)-1l))
+		return skb;
+
+	hdr_len = wb->hdr_len;
+	data_len = atl_data_len(wb);
+
+	if (atl_rx_linear) {
+		/* Linear skb mode. The entire packet was DMA'd into
+		 * the data buffer and skb has already been built
+		 * around it and dataref's pg_off has been increased
+		 * in atl_init_skb()
+		 */
+
+		atl_maybe_recycle_rxbuf(ring, rxbuf);
+		return skb;
+	}
+
+	/* Align the start of the next buffer in the page. This also
+	 * serves as truesize increment when the paged frag is added
+	 * to skb.
+	 */
+	aligned_data_len = ALIGN(data_len, L1_CACHE_BYTES);
+
+	if (first_frag && !hdr_split)
+		/* Header was not split off, so skip over it
+		 * when adding the paged frag
+		 */
+		data_offt = hdr_len;
+
+	if (!first_frag || wb->eop || !wb->rsc_cnt) {
+		atl_sync_range(ring, dataref, 0, data_len);
+
+		/* If header was not split off by HW, remember to pull
+		 * it into the skb head later. The rest of the data
+		 * buffer might need to be pulled too for small
+		 * packets, so delay the actual copying till later
+		 */
+		if (first_frag && !hdr_split)
+			to_pull = hdr_len;
+	}
+
+	/* If the entire packet fits within ATL_RX_HDR_SIZE bytes,
+	 * pull it into the skb head. This handles the header not
+	 * having been split by HW case correctly too, as
+	 * skb_headlen() will be zero in that case and data_len will
+	 * hold the whole packet length.
+	 */
+	if (first_frag && skb_headlen(skb) + data_len <= ATL_RX_HDR_SIZE) {
+		to_pull = data_len;
+		/* Recycle the data buffer as we're copying the
+		 * contents to skb head.
+		 */
+		aligned_data_len = 0;
+	} else {
+		/* Add the data buffer to paged frag list, skipping
+		 * the un-split header if any -- it will be copied to
+		 * skb head later.
+		 */
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+			dataref->rxpage->page, dataref->pg_off + data_offt,
+			data_len - data_offt, aligned_data_len);
+		page_ref_inc(dataref->rxpage->page);
+	}
+
+	if (to_pull)
+		atl_skb_put_data(skb, atl_buf_vaddr(dataref), to_pull);
+
+	/* Update the data buf's pg_off to point to free
+	 * space. Header buf's offset was updated in atl_init_skb()
+	 * for first frag of the packet only.
+	 */
+	dataref->pg_off += aligned_data_len;
+	atl_maybe_recycle_rxbuf(ring, rxbuf);
+
+	if (first_frag || !wb->eop || !wb->rsc_cnt)
+		return skb;
+
+	/* The last descriptor of RSC packet is done, unmap the head
+	 * fragment.
+	 */
+	atl_cb = ATL_CB(skb);
+
+	headref = &atl_cb->pgref;
+	if (unlikely(!headref->rxpage))
+		return skb;
+
+	if (likely(atl_cb->head)) {
+		atl_sync_range(ring, headref, ATL_RX_HEADROOM, hdr_len);
+		atl_put_rxpage(headref, dev);
+	} else {
+		atl_sync_range(ring, headref, 0, ATL_RX_BUF_SIZE);
+		/* Data buf's sync being delayed implies header was
+		 * not split off by HW. Fix that now.
+		 */
+		atl_skb_put_data(skb, atl_buf_vaddr(headref), hdr_len);
+		atl_put_rxpage(headref, dev);
+	}
+
+	return skb;
+}
+
+unsigned int atl_rx_refill_batch = 16;
+module_param_named(rx_refill_batch, atl_rx_refill_batch, uint, 0644);
+
+static int atl_clean_rx(struct atl_desc_ring *ring, int budget)
+{
+	unsigned int packets = 0;
+	unsigned int bytes = 0;
+	struct sk_buff *skb;
+
+	while (packets < budget) {
+		uint32_t space = ring_space(ring);
+		struct atl_rx_desc_wb *wb;
+		struct atl_rxbuf *rxbuf;
+		unsigned int len;
+		DECLARE_SCRATCH_DESC(scratch);
+
+		if (space >= atl_rx_refill_batch)
+			atl_fill_rx(ring, space);
+
+		rxbuf = &ring->rxbufs[ring->head];
+
+		wb = &DESC_PTR(ring, ring->head, scratch)->wb;
+		FETCH_DESC(ring, ring->head, scratch);
+
+		if (!wb->dd)
+			break;
+		DESC_RMB();
+
+		skb = atl_process_rx_frag(ring, rxbuf, wb);
+
+		/* Treat allocation errors as transient and retry later */
+		if (!skb) {
+			struct atl_nic *nic = ring->qvec->nic;
+
+			atl_nic_err("failed to alloc skb for RX packet\n");
+			break;
+		}
+
+		if (skb == (void *)-1l)
+			atl_maybe_recycle_rxbuf(ring, rxbuf);
+
+		bump_head(ring, 1);
+		if (!wb->eop) {
+			uint32_t next = wb->rsc_cnt ?
+				le16_to_cpu(wb->next_desp) :
+				ring->head;
+			/* If atl_process_rx_flags() returned any
+			 * other error this propagates the error to
+			 * the next descriptor of the packet,
+			 * preventing it from being treated as a start
+			 * of a new packet later.
+			 */
+			ring->rxbufs[next].skb = skb;
+			atl_update_ring_stat(ring, rx.non_eop_descs, 1);
+			continue;
+		}
+
+		if (skb == (void *)-1l)
+			continue;
+
+		len = skb->len;
+		if (atl_rx_packet(skb, wb, ring)) {
+			packets++;
+			bytes += len;
+		}
+	}
+
+	u64_stats_update_begin(&ring->syncp);
+	ring->stats.rx.bytes += bytes;
+	ring->stats.rx.packets += packets;
+	u64_stats_update_end(&ring->syncp);
+
+	return packets;
+}
+
+unsigned int atl_min_intr_delay = 10;
+module_param_named(min_intr_delay, atl_min_intr_delay, uint, 0644);
+
+static void atl_set_intr_throttle(struct atl_queue_vec *qvec)
+{
+	struct atl_hw *hw = &qvec->nic->hw;
+	atl_write(hw, ATL_INTR_THRTL(atl_qvec_intr(qvec)),
+		1 << 0x1f | ((atl_min_intr_delay / 2) & 0x1ff) << 0x10);
+}
+
+static int atl_poll(struct napi_struct *napi, int budget)
+{
+	struct atl_queue_vec *qvec;
+	struct atl_nic *nic;
+	bool clean_done;
+	int rx_cleaned;
+
+	qvec = container_of(napi, struct atl_queue_vec, napi);
+	nic = qvec->nic;
+
+	clean_done = atl_clean_tx(&qvec->tx);
+	rx_cleaned = atl_clean_rx(&qvec->rx, budget);
+
+	clean_done &= (rx_cleaned < budget);
+
+	if (!clean_done)
+		return budget;
+
+	napi_complete_done(napi, rx_cleaned);
+	atl_intr_enable(&nic->hw, BIT(atl_qvec_intr(qvec)));
+	/* atl_set_intr_throttle(&nic->hw, qvec->idx); */
+	return rx_cleaned;
+}
+
+/* XXX NOTE: only checked on device probe for now */
+static int enable_msi = 1;
+module_param_named(msi, enable_msi, int, 0444);
+
+static int atl_config_interrupts(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+	unsigned int flags;
+	int ret;
+	struct irq_affinity iaff = {
+		.pre_vectors = ATL_NUM_NON_RING_IRQS,
+		.post_vectors = 0,
+	};
+
+	if (enable_msi) {
+		flags = PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_AFFINITY;
+		ret = pci_alloc_irq_vectors_affinity(hw->pdev,
+			ATL_NUM_NON_RING_IRQS + 1,
+			ATL_NUM_NON_RING_IRQS + nic->requested_nvecs,
+			flags, &iaff);
+
+		/* pci_alloc_irq_vectors() never allocates less
+		 * than min_vectors
+		 */
+		if (ret > 0) {
+			ret -= ATL_NUM_NON_RING_IRQS;
+			nic->nvecs = ret;
+			nic->flags |= ATL_FL_MULTIPLE_VECTORS;
+			return ret;
+		}
+	}
+
+	atl_nic_warn("Couldn't allocate MSI-X / MSI vectors, falling back to legacy interrupts\n");
+
+	ret = pci_alloc_irq_vectors(hw->pdev, 1, 1, PCI_IRQ_LEGACY);
+	if (ret < 0) {
+		atl_nic_err("Couldn't allocate legacy IRQ\n");
+		return ret;
+	}
+
+	nic->nvecs = 1;
+	nic->flags &= ~ATL_FL_MULTIPLE_VECTORS;
+
+	return 1;
+}
+
+irqreturn_t atl_ring_irq(int irq, void *priv)
+{
+	struct napi_struct *napi = priv;
+
+	napi_schedule_irqoff(napi);
+	return IRQ_HANDLED;
+}
+
+void atl_clear_datapath(struct atl_nic *nic)
+{
+	int i;
+	struct atl_queue_vec *qvecs = nic->qvecs;
+
+	/* If atl_reconfigure() have failed previously,
+	 * atl_clear_datapath() can be called again on
+	 * pci_ops->remove(), without an intervening
+	 * atl_setup_datapath().
+	 */
+	if (!test_and_clear_bit(ATL_ST_CONFIGURED, &nic->state))
+		return;
+
+#ifdef ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY
+	for (i = 0; i < nic->nvecs; i++) {
+		int vector = pci_irq_vector(nic->hw.pdev,
+			i + ATL_NUM_NON_RING_IRQS);
+		irq_set_affinity_hint(vector, NULL);
+	}
+#endif
+
+	pci_free_irq_vectors(nic->hw.pdev);
+
+	if (!qvecs)
+		return;
+
+	for (i = 0; i < nic->nvecs; i++)
+		netif_napi_del(&qvecs[i].napi);
+	kfree(qvecs);
+	nic->qvecs = NULL;
+}
+
+int atl_setup_datapath(struct atl_nic *nic)
+{
+	int nvecs, i, ret;
+	struct atl_queue_vec *qvec;
+
+	nvecs = atl_config_interrupts(nic);
+	if (nvecs < 0)
+		return nvecs;
+
+	qvec = kcalloc(nvecs, sizeof(*qvec), GFP_KERNEL);
+	if (!qvec) {
+		atl_nic_err("Couldn't alloc qvecs\n");
+		ret = -ENOMEM;
+		goto exit_free;
+	}
+	nic->qvecs = qvec;
+
+	for (i = 0; i < nvecs; i++, qvec++) {
+		qvec->nic = nic;
+		qvec->idx = i;
+		qvec->dev = &nic->hw.pdev->dev;
+
+		qvec->rx.hw.reg_base = ATL_RX_RING(i);
+		qvec->rx.qvec = qvec;
+		qvec->rx.hw.size = nic->requested_rx_size;
+
+		qvec->tx.hw.reg_base = ATL_TX_RING(i);
+		qvec->tx.qvec = qvec;
+		qvec->tx.hw.size = nic->requested_tx_size;
+
+		u64_stats_init(&qvec->rx.syncp);
+		u64_stats_init(&qvec->tx.syncp);
+
+		netif_napi_add(nic->ndev, &qvec->napi, atl_poll, 64);
+	}
+
+	atl_compat_calc_affinities(nic);
+
+	nic->max_mtu = atl_rx_linear ? ATL_MAX_RX_LINEAR_MTU : ATL_MAX_MTU;
+
+	set_bit(ATL_ST_CONFIGURED, &nic->state);
+	return 0;
+
+exit_free:
+	atl_clear_datapath(nic);
+	return ret;
+}
+
+static inline void atl_free_rxpage(struct atl_pgref *pgref, struct device *dev)
+{
+	struct atl_rxpage *rxpage = pgref->rxpage;
+
+	if (!rxpage)
+		return;
+
+	/* Unmap, dropping the ref for being mapped */
+	__atl_free_rxpage(rxpage, dev);
+	pgref->rxpage = 0;
+}
+
+/* Releases any skbs that may have been queued on ring positions yet
+ * to be processes by poll. The buffers are kept to be re-used after
+ * resume / thaw. */
+static void atl_clear_rx_bufs(struct atl_desc_ring *ring)
+{
+	unsigned int bufs = ring_occupied(ring);
+	struct device *dev = ring->qvec->dev;
+
+	while (bufs) {
+		struct atl_rxbuf *rxbuf = &ring->rxbufs[ring->head];
+		struct sk_buff *skb = rxbuf->skb;
+
+		if (skb) {
+			struct atl_pgref *pgref = &ATL_CB(skb)->pgref;
+
+			atl_put_rxpage(pgref, dev);
+			dev_kfree_skb_any(skb);
+			rxbuf->skb = NULL;
+		}
+
+		bump_head(ring, 1);
+		bufs--;
+	}
+}
+
+static void atl_free_rx_bufs(struct atl_desc_ring *ring)
+{
+	struct device *dev = ring->qvec->dev;
+	struct atl_rxbuf *rxbuf;
+
+	if (!ring->rxbufs)
+		return;
+
+	for (rxbuf = ring->rxbufs;
+	     rxbuf < &ring->rxbufs[ring->hw.size]; rxbuf++) {
+		atl_free_rxpage(&rxbuf->head, dev);
+		atl_free_rxpage(&rxbuf->data, dev);
+	}
+}
+
+static void atl_free_tx_bufs(struct atl_desc_ring *ring)
+{
+	unsigned int bufs = ring_occupied(ring);
+
+	if (!ring->txbufs)
+		return;
+
+	while (bufs) {
+		struct atl_txbuf *txbuf;
+
+		bump_tail(ring, -1);
+		txbuf = &ring->txbufs[ring->tail];
+
+		atl_txbuf_free(txbuf, ring->qvec->dev, ring->tail);
+		bufs--;
+	}
+}
+
+static void atl_free_ring(struct atl_desc_ring *ring)
+{
+	if (ring->bufs) {
+		vfree(ring->bufs);
+		ring->bufs = 0;
+	}
+
+	atl_free_descs(ring->qvec->nic, &ring->hw);
+}
+
+static int atl_alloc_ring(struct atl_desc_ring *ring, size_t buf_size,
+	char *type)
+{
+	int ret;
+	struct atl_nic *nic = ring->qvec->nic;
+	int idx = ring->qvec->idx;
+
+	ret = atl_alloc_descs(nic, &ring->hw);
+	if (ret) {
+		atl_nic_err("Couldn't alloc %s[%d] descriptors\n", type, idx);
+		return ret;
+	}
+
+	ring->bufs = vzalloc(ring->hw.size * buf_size);
+	if (!ring->bufs) {
+		atl_nic_err("Couldn't alloc %s[%d] %sbufs\n", type, idx, type);
+		ret = -ENOMEM;
+		goto free;
+	}
+
+	ring->head = ring->tail =
+		atl_read(&nic->hw, ATL_RING_HEAD(ring)) & 0x1fff;
+	return 0;
+
+free:
+	atl_free_ring(ring);
+	return ret;
+}
+
+static int atl_alloc_qvec_intr(struct atl_queue_vec *qvec)
+{
+	struct atl_nic *nic = qvec->nic;
+	int vector;
+	int ret;
+
+	snprintf(qvec->name, sizeof(qvec->name), "%s-ring-%d",
+		nic->ndev->name, qvec->idx);
+
+	if (!(nic->flags & ATL_FL_MULTIPLE_VECTORS))
+		return 0;
+
+	vector = pci_irq_vector(nic->hw.pdev, atl_qvec_intr(qvec));
+	ret = request_irq(vector, atl_ring_irq, 0, qvec->name, &qvec->napi);
+	if (ret) {
+		atl_nic_err("request MSI ring vector failed: %d\n", -ret);
+		return ret;
+	}
+
+	atl_compat_set_affinity(vector, qvec);
+
+	return 0;
+}
+
+static void atl_free_qvec_intr(struct atl_queue_vec *qvec)
+{
+	int vector = pci_irq_vector(qvec->nic->hw.pdev, atl_qvec_intr(qvec));
+
+	if (!(qvec->nic->flags & ATL_FL_MULTIPLE_VECTORS))
+		return;
+
+	atl_compat_set_affinity(vector, NULL);
+	free_irq(vector, &qvec->napi);
+}
+
+static int atl_alloc_qvec(struct atl_queue_vec *qvec)
+{
+	struct atl_txbuf *txbuf;
+	int count = qvec->tx.hw.size;
+	int ret;
+
+	ret = atl_alloc_qvec_intr(qvec);
+	if (ret)
+		return ret;
+
+	ret = atl_alloc_ring(&qvec->tx, sizeof(struct atl_txbuf), "tx");
+	if (ret)
+		goto free_irq;
+
+	ret = atl_alloc_ring(&qvec->rx, sizeof(struct atl_rxbuf), "rx");
+	if (ret)
+		goto free_tx;
+
+	for (txbuf = qvec->tx.txbufs; count; count--)
+		(txbuf++)->last = -1;
+
+	return 0;
+
+free_tx:
+	atl_free_ring(&qvec->tx);
+free_irq:
+	atl_free_qvec_intr(qvec);
+
+	return ret;
+}
+
+static void atl_free_qvec(struct atl_queue_vec *qvec)
+{
+	struct atl_desc_ring *rx = &qvec->rx;
+	struct atl_desc_ring *tx = &qvec->tx;
+
+	atl_free_rx_bufs(rx);
+	atl_free_ring(rx);
+
+	atl_free_ring(tx);
+	atl_free_qvec_intr(qvec);
+}
+
+int atl_alloc_rings(struct atl_nic *nic)
+{
+	struct atl_queue_vec *qvec;
+	int ret;
+
+	atl_for_each_qvec(nic, qvec) {
+		ret = atl_alloc_qvec(qvec);
+		if (ret)
+			goto free;
+	}
+
+	return 0;
+
+free:
+	while(--qvec >= &nic->qvecs[0])
+		atl_free_qvec(qvec);
+
+	return ret;
+}
+
+void atl_free_rings(struct atl_nic *nic)
+{
+	struct atl_queue_vec *qvec;
+
+	atl_for_each_qvec(nic, qvec)
+		atl_free_qvec(qvec);
+
+}
+
+static unsigned int atl_rx_mod_hyst = 10, atl_tx_mod_hyst = 10;
+module_param_named(rx_mod_hyst, atl_rx_mod_hyst, uint, 0644);
+module_param_named(tx_mod_hyst, atl_tx_mod_hyst, uint, 0644);
+
+static void atl_set_intr_mod_qvec(struct atl_queue_vec *qvec)
+{
+	struct atl_nic *nic = qvec->nic;
+	struct atl_hw *hw = &nic->hw;
+	unsigned int min, max;
+	int idx = qvec->idx;
+
+	min = nic->rx_intr_delay - atl_min_intr_delay;
+	max = min + atl_rx_mod_hyst;
+
+	atl_write(hw, ATL_RX_INTR_MOD_CTRL(idx),
+		(max / 2) << 0x10 | (min / 2) << 8 | 2);
+
+	min = nic->tx_intr_delay - atl_min_intr_delay;
+	max = min + atl_tx_mod_hyst;
+
+	atl_write(hw, ATL_TX_INTR_MOD_CTRL(idx),
+		(max / 2) << 0x10 | (min / 2) << 8 | 2);
+}
+
+void atl_set_intr_mod(struct atl_nic *nic)
+{
+	struct atl_queue_vec *qvec;
+
+	atl_for_each_qvec(nic, qvec)
+		atl_set_intr_mod_qvec(qvec);
+}
+
+static void atl_start_rx_ring(struct atl_desc_ring *ring)
+{
+	struct atl_hw *hw = &ring->qvec->nic->hw;
+	int idx = ring->qvec->idx;
+	unsigned int rx_ctl;
+
+	atl_write(hw, ATL_RING_BASE_LSW(ring), ring->hw.daddr);
+	atl_write(hw, ATL_RING_BASE_MSW(ring), ring->hw.daddr >> 32);
+
+	atl_write(hw, ATL_RX_RING_TAIL(ring), ring->tail);
+	atl_write(hw, ATL_RX_RING_BUF_SIZE(ring),
+		(ATL_RX_HDR_SIZE / 64) << 8 | ATL_RX_BUF_SIZE / 1024);
+	atl_write(hw, ATL_RX_RING_THRESH(ring), 8 << 0x10 | 24 << 0x18);
+
+	/* LRO */
+	atl_write_bits(hw, ATL_RX_LRO_PKT_LIM(idx),
+		(idx & 7) * 4, 2, 3);
+
+	/* Enable ring | VLAN offload | header split in non-linear mode */
+	rx_ctl = BIT(31) | BIT(29) | ring->hw.size |
+		(atl_rx_linear ? 0 : BIT(28));
+	atl_write(hw, ATL_RX_RING_CTL(ring), rx_ctl);
+}
+
+static void atl_start_tx_ring(struct atl_desc_ring *ring)
+{
+	struct atl_nic *nic = ring->qvec->nic;
+	struct atl_hw *hw = &nic->hw;
+
+	atl_write(hw, ATL_RING_BASE_LSW(ring), ring->hw.daddr);
+	atl_write(hw, ATL_RING_BASE_MSW(ring), ring->hw.daddr >> 32);
+
+	/* Enable TSO on all active Tx rings */
+	atl_write(hw, ATL_TX_LSO_CTRL, BIT(nic->nvecs) - 1);
+
+	atl_write(hw, ATL_TX_RING_TAIL(ring), ring->tail);
+	atl_write(hw, ATL_TX_RING_THRESH(ring), 8 << 8 | 8 << 0x10 |
+		24 << 0x18);
+	atl_write(hw, ATL_TX_RING_CTL(ring), BIT(31) | ring->hw.size);
+}
+
+static int atl_start_qvec(struct atl_queue_vec *qvec)
+{
+	struct atl_desc_ring *rx = &qvec->rx;
+	struct atl_desc_ring *tx = &qvec->tx;
+	struct atl_hw *hw = &qvec->nic->hw;
+	int intr = atl_qvec_intr(qvec);
+	struct atl_rxbuf *rxbuf;
+	int ret;
+
+	rx->head = rx->tail = atl_read(hw, ATL_RING_HEAD(rx)) & 0x1fff;
+	tx->head = tx->tail = atl_read(hw, ATL_RING_HEAD(tx)) & 0x1fff;
+
+	ret = atl_fill_rx(rx, ring_space(rx));
+	if (ret)
+		return ret;
+
+	rx->next_to_recycle = rx->tail;
+	/* rxbuf at ->next_to_recycle is always kept empty so that
+	 * atl_maybe_recycle_rxbuf() always have a spot to recyle into
+	 * without overwriting a pgref to an already allocated page,
+	 * leaking memory. It's also the guard element in the ring
+	 * that keeps ->tail from overrunning ->head. If it's nonempty
+	 * on ring init (e.g. after a sleep-wake cycle) just release
+	 * the pages. */
+	rxbuf = &rx->rxbufs[rx->next_to_recycle];
+	atl_put_rxpage(&rxbuf->head, qvec->dev);
+	atl_put_rxpage(&rxbuf->data, qvec->dev);
+
+	/* Map ring interrups into corresponding cause bit*/
+	atl_set_intr_bits(hw, qvec->idx, intr, intr);
+	atl_set_intr_throttle(qvec);
+
+	napi_enable(&qvec->napi);
+	atl_set_intr_mod_qvec(qvec);
+	atl_intr_enable(hw, BIT(atl_qvec_intr(qvec)));
+
+	atl_start_tx_ring(tx);
+	atl_start_rx_ring(rx);
+
+	return 0;
+}
+
+static void atl_stop_qvec(struct atl_queue_vec *qvec)
+{
+	struct atl_desc_ring *rx = &qvec->rx;
+	struct atl_desc_ring *tx = &qvec->tx;
+	struct atl_hw *hw = &qvec->nic->hw;
+
+	/* Disable and reset rings */
+	atl_write(hw, ATL_RING_CTL(rx), BIT(25));
+	atl_write(hw, ATL_RING_CTL(tx), BIT(25));
+	udelay(10);
+	atl_write(hw, ATL_RING_CTL(rx), 0);
+	atl_write(hw, ATL_RING_CTL(tx), 0);
+
+	atl_intr_disable(hw, BIT(atl_qvec_intr(qvec)));
+	napi_disable(&qvec->napi);
+
+	atl_clear_rx_bufs(rx);
+	atl_free_tx_bufs(tx);
+}
+
+static void atl_set_lro(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+	uint32_t val = nic->ndev->features & NETIF_F_LRO ?
+		BIT(nic->nvecs) - 1 : 0;
+
+	atl_write_bits(hw, ATL_RX_LRO_CTRL1, 0, nic->nvecs, val);
+	atl_write_bits(hw, ATL_INTR_RSC_EN, 0, nic->nvecs, val);
+}
+
+int atl_start_rings(struct atl_nic *nic)
+{
+	struct atl_hw *hw = &nic->hw;
+	uint32_t mask;
+	struct atl_queue_vec *qvec;
+	int ret;
+
+	mask = BIT(nic->nvecs + ATL_NUM_NON_RING_IRQS) -
+		BIT(ATL_NUM_NON_RING_IRQS);
+	/* Enable auto-masking of ring interrupts on intr generation */
+	atl_set_bits(hw, ATL_INTR_AUTO_MASK, mask);
+	/* Enable status auto-clear on intr generation */
+	atl_set_bits(hw, ATL_INTR_AUTO_CLEAR, mask);
+
+	atl_set_lro(nic);
+	atl_set_rss_tbl(hw);
+
+	atl_for_each_qvec(nic, qvec) {
+		ret = atl_start_qvec(qvec);
+		if (ret)
+			goto stop;
+	}
+
+	return 0;
+
+stop:
+	while (--qvec >= &nic->qvecs[0])
+		atl_stop_qvec(qvec);
+
+	return ret;
+}
+
+void atl_stop_rings(struct atl_nic *nic)
+{
+	struct atl_queue_vec *qvec;
+	struct atl_hw *hw = &nic->hw;
+
+	atl_for_each_qvec(nic, qvec)
+		atl_stop_qvec(qvec);
+
+	atl_write_bit(hw, 0x5a00, 0, 1);
+	udelay(10);
+	atl_write_bit(hw, 0x5a00, 0, 0);
+}
+
+int atl_set_features(struct net_device *ndev, netdev_features_t features)
+{
+	netdev_features_t changed = ndev->features ^ features;
+
+	ndev->features = features;
+
+	if (changed & NETIF_F_LRO)
+		atl_set_lro(netdev_priv(ndev));
+
+	return 0;
+}
+
+void atl_get_ring_stats(struct atl_desc_ring *ring,
+	struct atl_ring_stats *stats)
+{
+	unsigned int start;
+
+	do {
+		start = u64_stats_fetch_begin_irq(&ring->syncp);
+		memcpy(stats, &ring->stats, sizeof(*stats));
+	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+}
+
+#define atl_add_stats(_dst, _src)				\
+do {								\
+	int i;							\
+	uint64_t *dst = (uint64_t *)(&(_dst));			\
+	uint64_t *src = (uint64_t *)(&(_src));			\
+								\
+	for (i = 0; i < sizeof(_dst) / sizeof(uint64_t); i++)	\
+		dst[i] += src[i];				\
+} while (0)
+
+void atl_update_global_stats(struct atl_nic *nic)
+{
+	int i;
+	struct atl_ring_stats stats;
+
+	memset(&stats, 0, sizeof(stats));
+	atl_update_eth_stats(nic);
+
+	spin_lock(&nic->stats_lock);
+
+	memset(&nic->stats.rx, 0, sizeof(nic->stats.rx));
+	memset(&nic->stats.tx, 0, sizeof(nic->stats.tx));
+
+
+	for (i = 0; i < nic->nvecs; i++) {
+		atl_get_ring_stats(&nic->qvecs[i].rx, &stats);
+		atl_add_stats(nic->stats.rx, stats.rx);
+
+		atl_get_ring_stats(&nic->qvecs[i].tx, &stats);
+		atl_add_stats(nic->stats.tx, stats.tx);
+	}
+
+	spin_unlock(&nic->stats_lock);
+}
+
+void atl_get_stats64(struct net_device *ndev,
+	struct rtnl_link_stats64 *nstats)
+{
+	struct atl_nic *nic = netdev_priv(ndev);
+	struct atl_global_stats *stats = &nic->stats;
+
+	atl_update_global_stats(nic);
+
+	nstats->rx_bytes = stats->rx.bytes;
+	nstats->rx_packets = stats->rx.packets;
+	nstats->tx_bytes = stats->tx.bytes;
+	nstats->tx_packets = stats->tx.packets;
+	nstats->rx_crc_errors = stats->rx.csum_err;
+	nstats->rx_frame_errors = stats->rx.mac_err;
+	nstats->rx_errors = nstats->rx_crc_errors + nstats->rx_frame_errors;
+	nstats->multicast = stats->rx.multicast;
+	nstats->tx_aborted_errors = stats->tx.dma_map_failed;
+	nstats->tx_errors = nstats->tx_aborted_errors;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.h
new file mode 100644
index 0000000..bc433db
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_ring.h
@@ -0,0 +1,199 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _ATL_RING_H_
+#define _ATL_RING_H_
+
+#include <linux/compiler.h>
+
+#include "atl_common.h"
+#include "atl_desc.h"
+
+//#define ATL_RINGS_IN_UC_MEM
+
+#define ATL_TX_DESC_WB
+//#define ATL_TX_HEAD_WB
+
+#define ATL_RX_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+#define ATL_RX_TAILROOM 64u
+#define ATL_RX_HEAD_ORDER 0
+#define ATL_RX_DATA_ORDER 0
+
+/* Header space in skb. Must be a multiple of L1_CACHE_BYTES */
+#define ATL_RX_HDR_SIZE 256u
+#define ATL_RX_HDR_OVRHD SKB_DATA_ALIGN(ATL_RX_HEADROOM +	\
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define ATL_RX_BUF_SIZE 2048
+
+#define ATL_MAX_RX_LINEAR_MTU (ATL_RX_BUF_SIZE - ETH_HLEN)
+
+#define ring_space(ring)						\
+	({								\
+		typeof(ring) __ring = (ring);				\
+		uint32_t space = READ_ONCE(__ring->head) -		\
+			READ_ONCE(__ring->tail) - 1;			\
+		(int32_t)space < 0 ? space + __ring->hw.size : space;	\
+	})
+
+#define ring_occupied(ring)						\
+	({								\
+		typeof(ring) __ring = (ring);				\
+		uint32_t occupied = READ_ONCE(__ring->tail) -		\
+			READ_ONCE(__ring->head);			\
+		(int32_t)occupied < 0 ? occupied + __ring->hw.size	\
+			: occupied;					\
+	})
+
+#define bump_ptr(ptr, ring, amount)					\
+	({								\
+		uint32_t __res = offset_ptr(ptr, ring, amount);		\
+		(ptr) = __res;						\
+		__res;							\
+	})
+
+/* These don't have to be atomic, because Tx tail is only adjusted
+ * in ndo->start_xmit which is serialized by the stack and the rest are
+ * only adjusted in NAPI poll which is serialized by NAPI */
+#define bump_tail(ring, amount) do {					\
+	uint32_t __ptr = READ_ONCE((ring)->tail);			\
+	WRITE_ONCE((ring)->tail, offset_ptr(__ptr, ring, amount));	\
+	} while (0)
+
+#define bump_head(ring, amount) do {					\
+	uint32_t __ptr = READ_ONCE((ring)->head);			\
+	WRITE_ONCE((ring)->head, offset_ptr(__ptr, ring, amount));	\
+	} while (0)
+
+struct atl_rxpage {
+	struct page *page;
+	dma_addr_t daddr;
+	unsigned mapcount; 	/* not atomic_t because accesses are
+				 * serialized by NAPI */
+	unsigned order;
+};
+
+struct atl_pgref {
+	struct atl_rxpage *rxpage;
+	unsigned pg_off;
+};
+
+struct atl_cb {
+	struct atl_pgref pgref;
+	bool head;
+};
+#define ATL_CB(skb) ((struct atl_cb *)(skb)->cb)
+
+struct atl_rxbuf {
+	struct sk_buff *skb;
+	struct atl_pgref head;
+	struct atl_pgref data;
+};
+
+struct atl_txbuf {
+	struct sk_buff *skb;
+	uint32_t last; /* index of eop descriptor */
+	unsigned bytes;
+	unsigned packets;
+	DEFINE_DMA_UNMAP_ADDR(daddr);
+	DEFINE_DMA_UNMAP_LEN(len);
+};
+
+struct atl_desc_ring {
+	struct atl_hw_ring hw;
+	uint32_t head, tail;
+	union {
+		/* Rx ring only */
+		uint32_t next_to_recycle;
+		/* Tx ring only, template desc for atl_map_tx_skb() */
+		union atl_desc desc;
+	};
+	union {
+		struct atl_rxbuf *rxbufs;
+		struct atl_txbuf *txbufs;
+		void *bufs;
+	};
+	struct atl_queue_vec *qvec;
+	struct u64_stats_sync syncp;
+	struct atl_ring_stats stats;
+};
+
+struct atl_queue_vec {
+	struct atl_desc_ring tx;
+	struct atl_desc_ring rx;
+	struct device *dev;	/* pdev->dev for DMA */
+	struct napi_struct napi;
+	struct atl_nic *nic;
+	unsigned idx;
+	char name[IFNAMSIZ + 10];
+#ifdef ATL_COMPAT_PCI_ALLOC_IRQ_VECTORS_AFFINITY
+	cpumask_t affinity_hint;
+#endif
+};
+
+#define atl_for_each_qvec(nic, qvec)				\
+	for (qvec = &(nic)->qvecs[0];				\
+	     qvec < &(nic)->qvecs[(nic)->nvecs]; qvec++)
+
+static inline struct atl_hw *ring_hw(struct atl_desc_ring *ring)
+{
+	return &ring->qvec->nic->hw;
+}
+
+static inline int atl_qvec_intr(struct atl_queue_vec *qvec)
+{
+	return qvec->idx + ATL_NUM_NON_RING_IRQS;
+}
+
+static inline void *atl_buf_vaddr(struct atl_pgref *pgref)
+{
+	return page_to_virt(pgref->rxpage->page) + pgref->pg_off;
+}
+
+static inline dma_addr_t atl_buf_daddr(struct atl_pgref *pgref)
+{
+	return pgref->rxpage->daddr + pgref->pg_off;
+}
+
+void atl_get_ring_stats(struct atl_desc_ring *ring,
+	struct atl_ring_stats *stats);
+
+#ifdef ATL_RINGS_IN_UC_MEM
+
+#define DECLARE_SCRATCH_DESC(_name) union atl_desc _name
+#define DESC_PTR(_ring, _idx, _scratch) (&(_scratch))
+#define COMMIT_DESC(_ring, _idx, _scratch)		\
+	WRITE_ONCE((_ring)->hw.descs[_idx], (_scratch))
+#define FETCH_DESC(_ring, _idx, _scratch)			\
+do {								\
+	(_scratch) = READ_ONCE((_ring)->hw.descs[_idx]);	\
+	dma_rmb();						\
+} while(0)
+
+#define DESC_RMB()
+
+#else // ATL_RINGS_IN_UC_MEM
+
+#define DECLARE_SCRATCH_DESC(_name)
+#define DESC_PTR(_ring, _idx, _scratch) (&(_ring)->hw.descs[_idx])
+#define COMMIT_DESC(_ring, _idx, _scratch)
+#define FETCH_DESC(_ring, _idx, _scratch)
+#define DESC_RMB() dma_rmb()
+
+#endif // ATL_RINGS_IN_UC_MEM
+
+#ifdef ATL_TX_HEAD_WB
+#error Head ptr writeback not implemented
+#elif !defined(ATL_TX_DESC_WB)
+static inline uint32_t atl_get_tx_head(struct atl_desc_ring *ring)
+{
+	return atl_read(ring_hw(ring), ATL_TX_RING_HEAD(ring->idx)) & 0x1fff;
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_trace.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_trace.c
new file mode 100644
index 0000000..70c5806
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_trace.c
@@ -0,0 +1,14 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#define CREATE_TRACE_POINTS
+#include "atl_trace.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_trace.h b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_trace.h
new file mode 100644
index 0000000..e7333d5
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_trace.h
@@ -0,0 +1,130 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM atlnew
+
+#if !defined(_ATL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ATL_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "atl_desc.h"
+
+DECLARE_EVENT_CLASS(atl_dma_map_class,
+	TP_PROTO(int frag_idx, int ring_idx, dma_addr_t daddr, size_t size, struct sk_buff *skb,
+		void *vaddr),
+	TP_ARGS(frag_idx, ring_idx, daddr, size, skb, vaddr),
+	TP_STRUCT__entry(
+		__field(int, frag_idx)
+		__field(int, ring_idx)
+		__field(dma_addr_t, daddr)
+		__field(size_t, size)
+		__field(struct sk_buff *, skb)
+		__field(void *, vaddr)
+	),
+	TP_fast_assign(
+		__entry->frag_idx = frag_idx;
+		__entry->ring_idx = ring_idx;
+		__entry->daddr = daddr;
+		__entry->size = size;
+		__entry->skb = skb;
+		__entry->vaddr = vaddr;
+	),
+	TP_printk("idx %d ring idx %d daddr %pad len %#zx skb %p vaddr %p",
+		__entry->frag_idx, __entry->ring_idx, &__entry->daddr,
+		__entry->size, __entry->skb, __entry->vaddr)
+);
+
+#define DEFINE_MAP_EVENT(name)						\
+	DEFINE_EVENT(atl_dma_map_class, name,				\
+		TP_PROTO(int frag_idx, int ring_idx,			\
+			dma_addr_t daddr, size_t size,			\
+			struct sk_buff *skb, void *vaddr),		\
+		TP_ARGS(frag_idx, ring_idx, daddr, size, skb, vaddr))
+
+DEFINE_MAP_EVENT(atl_dma_map_head);
+DEFINE_MAP_EVENT(atl_dma_map_frag);
+DEFINE_MAP_EVENT(atl_dma_map_rxbuf);
+
+DECLARE_EVENT_CLASS(atl_dma_unmap_class,
+	TP_PROTO(int frag_idx, int ring_idx, dma_addr_t daddr, size_t size,
+		struct sk_buff *skb),
+	TP_ARGS(frag_idx, ring_idx, daddr, size, skb),
+	TP_STRUCT__entry(
+		__field(int, frag_idx)
+		__field(int, ring_idx)
+		__field(dma_addr_t, daddr)
+		__field(size_t, size)
+		__field(struct sk_buff *, skb)
+	),
+	TP_fast_assign(
+		__entry->frag_idx = frag_idx;
+		__entry->ring_idx = ring_idx;
+		__entry->daddr = daddr;
+		__entry->size = size;
+		__entry->skb = skb;
+	),
+	TP_printk("idx %d ring idx %d daddr %pad len %#zx skb %p",
+		__entry->frag_idx, __entry->ring_idx, &__entry->daddr,
+		__entry->size, __entry->skb)
+);
+
+#define DEFINE_UNMAP_EVENT(name)					\
+	DEFINE_EVENT(atl_dma_unmap_class, name,				\
+		TP_PROTO(int frag_idx, int ring_idx, dma_addr_t daddr,	\
+			size_t size, struct sk_buff *skb),		\
+		TP_ARGS(frag_idx, ring_idx, daddr, size, skb))
+
+DEFINE_UNMAP_EVENT(atl_dma_unmap_head);
+DEFINE_UNMAP_EVENT(atl_dma_unmap_frag);
+DEFINE_UNMAP_EVENT(atl_dma_unmap_rxbuf);
+
+TRACE_EVENT(atl_fill_rx_desc,
+	TP_PROTO(int ring_idx, struct atl_rx_desc *desc),
+	TP_ARGS(ring_idx, desc),
+	TP_STRUCT__entry(
+		__field(int, ring_idx)
+		__field(dma_addr_t, daddr)
+		__field(dma_addr_t, haddr)
+	),
+	TP_fast_assign(
+		__entry->ring_idx = ring_idx;
+		__entry->daddr = desc->daddr;
+		__entry->haddr = desc->haddr;
+	),
+	TP_printk("[%d] daddr %pad", __entry->ring_idx, &__entry->daddr)
+);
+
+TRACE_EVENT(atl_sync_rx_range,
+	TP_PROTO(int ring_idx, dma_addr_t daddr, unsigned long pg_off,
+		size_t size),
+	TP_ARGS(ring_idx, daddr, pg_off, size),
+	TP_STRUCT__entry(
+		__field(int, ring_idx)
+		__field(dma_addr_t, daddr)
+		__field(unsigned long, pg_off)
+		__field(size_t, size)
+	),
+	TP_fast_assign(
+		__entry->ring_idx = ring_idx;
+		__entry->daddr = daddr;
+		__entry->pg_off = pg_off;
+		__entry->size = size;
+	),
+	TP_printk("[%d] daddr %pad pg_off %#lx size %#zx", __entry->ring_idx,
+		&__entry->daddr, __entry->pg_off, __entry->size)
+);
+
+#endif /* _ATL_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef  TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE atl_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0de487a..3db54b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1282,6 +1282,7 @@
 	BNX2X_SP_RTNL_TX_STOP,
 	BNX2X_SP_RTNL_GET_DRV_VERSION,
 	BNX2X_SP_RTNL_CHANGE_UDP_PORT,
+	BNX2X_SP_RTNL_UPDATE_SVID,
 };
 
 enum bnx2x_iov_flag {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index fcc2328..a585f10 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2925,6 +2925,10 @@
 	func_params.f_obj = &bp->func_obj;
 	func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
 
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
 	if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
 		int func = BP_ABS_FUNC(bp);
 		u32 val;
@@ -4301,7 +4305,8 @@
 				bnx2x_handle_eee_event(bp);
 
 			if (val & DRV_STATUS_OEM_UPDATE_SVID)
-				bnx2x_handle_update_svid_cmd(bp);
+				bnx2x_schedule_sp_rtnl(bp,
+					BNX2X_SP_RTNL_UPDATE_SVID, 0);
 
 			if (bp->link_vars.periodic_flags &
 			    PERIODIC_FLAGS_LINK_EVENT) {
@@ -8462,6 +8467,7 @@
 	/* Fill a user request section if needed */
 	if (!test_bit(RAMROD_CONT, ramrod_flags)) {
 		ramrod_param.user_req.u.vlan.vlan = vlan;
+		__set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
 		/* Set the command: ADD or DEL */
 		if (set)
 			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
@@ -8482,6 +8488,27 @@
 	return rc;
 }
 
+static int bnx2x_del_all_vlans(struct bnx2x *bp)
+{
+	struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
+	unsigned long ramrod_flags = 0, vlan_flags = 0;
+	struct bnx2x_vlan_entry *vlan;
+	int rc;
+
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	__set_bit(BNX2X_VLAN, &vlan_flags);
+	rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
+	if (rc)
+		return rc;
+
+	/* Mark that hw forgot all entries */
+	list_for_each_entry(vlan, &bp->vlan_reg, link)
+		vlan->hw = false;
+	bp->vlan_cnt = 0;
+
+	return 0;
+}
+
 int bnx2x_del_all_macs(struct bnx2x *bp,
 		       struct bnx2x_vlan_mac_obj *mac_obj,
 		       int mac_type, bool wait_for_comp)
@@ -9320,6 +9347,17 @@
 		BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
 			  rc);
 
+	/* The whole *vlan_obj structure may be not initialized if VLAN
+	 * filtering offload is not supported by hardware. Currently this is
+	 * true for all hardware covered by CHIP_IS_E1x().
+	 */
+	if (!CHIP_IS_E1x(bp)) {
+		/* Remove all currently configured VLANs */
+		rc = bnx2x_del_all_vlans(bp);
+		if (rc < 0)
+			BNX2X_ERR("Failed to delete all VLANs\n");
+	}
+
 	/* Disable LLH */
 	if (!CHIP_IS_E1(bp))
 		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
@@ -10349,6 +10387,9 @@
 			       &bp->sp_rtnl_state))
 		bnx2x_update_mng_version(bp);
 
+	if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
+		bnx2x_handle_update_svid_cmd(bp);
+
 	if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
 			       &bp->sp_rtnl_state)) {
 		if (bnx2x_udp_port_update(bp)) {
@@ -11740,8 +11781,10 @@
 	 * If maximum allowed number of connections is zero -
 	 * disable the feature.
 	 */
-	if (!bp->cnic_eth_dev.max_fcoe_conn)
+	if (!bp->cnic_eth_dev.max_fcoe_conn) {
 		bp->flags |= NO_FCOE_FLAG;
+		eth_zero_addr(bp->fip_mac);
+	}
 }
 
 static void bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -13014,13 +13057,6 @@
 
 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
 {
-	struct bnx2x_vlan_entry *vlan;
-
-	/* The hw forgot all entries after reload */
-	list_for_each_entry(vlan, &bp->vlan_reg, link)
-		vlan->hw = false;
-	bp->vlan_cnt = 0;
-
 	/* Don't set rx mode here. Our caller will do it. */
 	bnx2x_vlan_configure(bp, false);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 0bf2fd4..7a6e82d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -265,6 +265,7 @@
 	BNX2X_ETH_MAC,
 	BNX2X_ISCSI_ETH_MAC,
 	BNX2X_NETQ_ETH_MAC,
+	BNX2X_VLAN,
 	BNX2X_DONT_CONSUME_CAM_CREDIT,
 	BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
 };
@@ -272,7 +273,8 @@
 #define BNX2X_VLAN_MAC_CMP_MASK	(1 << BNX2X_UC_LIST_MAC | \
 				 1 << BNX2X_ETH_MAC | \
 				 1 << BNX2X_ISCSI_ETH_MAC | \
-				 1 << BNX2X_NETQ_ETH_MAC)
+				 1 << BNX2X_NETQ_ETH_MAC | \
+				 1 << BNX2X_VLAN)
 #define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
 	((flags) & BNX2X_VLAN_MAC_CMP_MASK)
 
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 58b9744..8f4b2f9 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -61,7 +61,8 @@
 #define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
 					| MACB_BIT(ISR_RLE)		\
 					| MACB_BIT(TXERR))
-#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
+#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)	\
+					| MACB_BIT(TXUBR))
 
 /* Max length of transmit frame must be a multiple of 8 bytes */
 #define MACB_TX_LEN_ALIGN	8
@@ -681,6 +682,11 @@
 	if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
 		desc_64 = macb_64b_desc(bp, desc);
 		desc_64->addrh = upper_32_bits(addr);
+		/* The low bits of RX address contain the RX_USED bit, clearing
+		 * of which allows packet RX. Make sure the high bits are also
+		 * visible to HW at that point.
+		 */
+		dma_wmb();
 	}
 #endif
 	desc->addr = lower_32_bits(addr);
@@ -929,14 +935,19 @@
 
 			if (entry == bp->rx_ring_size - 1)
 				paddr |= MACB_BIT(RX_WRAP);
-			macb_set_addr(bp, desc, paddr);
 			desc->ctrl = 0;
+			/* Setting addr clears RX_USED and allows reception,
+			 * make sure ctrl is cleared first to avoid a race.
+			 */
+			dma_wmb();
+			macb_set_addr(bp, desc, paddr);
 
 			/* properly align Ethernet header */
 			skb_reserve(skb, NET_IP_ALIGN);
 		} else {
-			desc->addr &= ~MACB_BIT(RX_USED);
 			desc->ctrl = 0;
+			dma_wmb();
+			desc->addr &= ~MACB_BIT(RX_USED);
 		}
 	}
 
@@ -990,11 +1001,15 @@
 
 		rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
 		addr = macb_get_addr(bp, desc);
-		ctrl = desc->ctrl;
 
 		if (!rxused)
 			break;
 
+		/* Ensure ctrl is at least as up-to-date as rxused */
+		dma_rmb();
+
+		ctrl = desc->ctrl;
+
 		queue->rx_tail++;
 		count++;
 
@@ -1169,11 +1184,14 @@
 		/* Make hw descriptor updates visible to CPU */
 		rmb();
 
-		ctrl = desc->ctrl;
-
 		if (!(desc->addr & MACB_BIT(RX_USED)))
 			break;
 
+		/* Ensure ctrl is at least as up-to-date as addr */
+		dma_rmb();
+
+		ctrl = desc->ctrl;
+
 		if (ctrl & MACB_BIT(RX_SOF)) {
 			if (first_frag != -1)
 				discard_partial_frame(queue, first_frag, tail);
@@ -1313,6 +1331,21 @@
 	netif_tx_start_all_queues(dev);
 }
 
+static void macb_tx_restart(struct macb_queue *queue)
+{
+	unsigned int head = queue->tx_head;
+	unsigned int tail = queue->tx_tail;
+	struct macb *bp = queue->bp;
+
+	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+		queue_writel(queue, ISR, MACB_BIT(TXUBR));
+
+	if (head == tail)
+		return;
+
+	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+}
+
 static irqreturn_t macb_interrupt(int irq, void *dev_id)
 {
 	struct macb_queue *queue = dev_id;
@@ -1370,6 +1403,9 @@
 		if (status & MACB_BIT(TCOMP))
 			macb_tx_interrupt(queue);
 
+		if (status & MACB_BIT(TXUBR))
+			macb_tx_restart(queue);
+
 		/* Link change detection isn't possible with RMII, so we'll
 		 * add that if/when we get our hands on a full-blown MII PHY.
 		 */
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index cd5296b8..a6dc47e 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -319,6 +319,8 @@
 	desc_ptp = macb_ptp_desc(queue->bp, desc);
 	tx_timestamp = &queue->tx_timestamps[head];
 	tx_timestamp->skb = skb;
+	/* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
+	dma_rmb();
 	tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
 	tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
 	/* move head */
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 22a817d..1e2b53a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1888,6 +1888,8 @@
 	u16 i, j;
 	u8 __iomem *bd;
 
+	netdev_reset_queue(ugeth->ndev);
+
 	ug_info = ugeth->ug_info;
 	uf_info = &ug_info->uf_info;
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index b52029e..ad1779f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -379,6 +379,9 @@
 
 	hns_ae_ring_enable_all(handle, 0);
 
+	/* clean rx fbd. */
+	hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
+
 	(void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
 }
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 09e4061..aa2c25d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -67,11 +67,14 @@
 	struct mac_driver *drv = (struct mac_driver *)mac_drv;
 
 	/*enable GE rX/tX */
-	if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+	if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
 		dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
 
-	if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+	if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+		/* enable rx pcs */
+		dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0);
 		dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
+	}
 }
 
 static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
@@ -79,11 +82,14 @@
 	struct mac_driver *drv = (struct mac_driver *)mac_drv;
 
 	/*disable GE rX/tX */
-	if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+	if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
 		dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
 
-	if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+	if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+		/* disable rx pcs */
+		dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1);
 		dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
+	}
 }
 
 /* hns_gmac_get_en - get port enable
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 6ed6f14..cfdc92d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -778,6 +778,17 @@
 	return rc;
 }
 
+static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb)
+{
+	if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev)
+		return;
+
+	phy_device_remove(mac_cb->phy_dev);
+	phy_device_free(mac_cb->phy_dev);
+
+	mac_cb->phy_dev = NULL;
+}
+
 #define MAC_MEDIA_TYPE_MAX_LEN		16
 
 static const struct {
@@ -1117,7 +1128,11 @@
 	int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
 
 	for (i = 0; i < max_port_num; i++) {
+		if (!dsaf_dev->mac_cb[i])
+			continue;
+
 		dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
+		hns_mac_remove_phydev(dsaf_dev->mac_cb[i]);
 		dsaf_dev->mac_cb[i] = NULL;
 	}
 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index e557a4e..3b9e74b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -935,6 +935,62 @@
 }
 
 /**
+ * hns_dsaf_tcam_uc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address,
+ * @ptbl_tcam_data,
+ */
+static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
+				       u32 address,
+				       struct dsaf_tbl_tcam_data *tcam_data,
+				       struct dsaf_tbl_tcam_data *tcam_mask,
+				       struct dsaf_tbl_tcam_ucast_cfg *tcam_uc)
+{
+	spin_lock_bh(&dsaf_dev->tcam_lock);
+	hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+	hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+	hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc);
+	hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+	hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+
+	/*Restore Match Data*/
+	tcam_mask->tbl_tcam_data_high = 0xffffffff;
+	tcam_mask->tbl_tcam_data_low = 0xffffffff;
+	hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+	spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address,
+ * @ptbl_tcam_data,
+ * @ptbl_tcam_mask
+ * @ptbl_tcam_mcast
+ */
+static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
+				       u32 address,
+				       struct dsaf_tbl_tcam_data *tcam_data,
+				       struct dsaf_tbl_tcam_data *tcam_mask,
+				       struct dsaf_tbl_tcam_mcast_cfg *tcam_mc)
+{
+	spin_lock_bh(&dsaf_dev->tcam_lock);
+	hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+	hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+	hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc);
+	hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+	hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+
+	/*Restore Match Data*/
+	tcam_mask->tbl_tcam_data_high = 0xffffffff;
+	tcam_mask->tbl_tcam_data_low = 0xffffffff;
+	hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+	spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
  * hns_dsaf_tcam_mc_invld - INT
  * @dsaf_id: dsa fabric id
  * @address
@@ -1493,6 +1549,27 @@
 }
 
 /**
+ * hns_dsaf_find_empty_mac_entry_reverse
+ * search dsa fabric soft empty-entry from the end
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev)
+{
+	struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+	struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+	int i;
+
+	soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1);
+	for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) {
+		/* search all entry from end to start.*/
+		if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+			return i;
+		soft_mac_entry--;
+	}
+	return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
  * hns_dsaf_set_mac_key - set mac key
  * @dsaf_dev: dsa fabric device struct pointer
  * @mac_key: tcam key pointer
@@ -2166,9 +2243,9 @@
 		DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
 
 	hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
-		DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num);
+		DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num);
 	hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
-		DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
+		DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num);
 
 	/* pfc pause frame statistics stored in dsaf inode*/
 	if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
@@ -2285,237 +2362,237 @@
 				DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
 		p[223 + i] = dsaf_read_dev(ddev,
 				DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
-		p[224 + i] = dsaf_read_dev(ddev,
+		p[226 + i] = dsaf_read_dev(ddev,
 				DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
 	}
 
-	p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
+	p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
 
 	for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
 		j = i * DSAF_COMM_CHN + port;
-		p[228 + i] = dsaf_read_dev(ddev,
+		p[230 + i] = dsaf_read_dev(ddev,
 				DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
 	}
 
-	p[231] = dsaf_read_dev(ddev,
-		DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4);
+	p[233] = dsaf_read_dev(ddev,
+		DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80);
 
 	/* dsaf inode registers */
 	for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) {
 		j = i * DSAF_COMM_CHN + port;
-		p[232 + i] = dsaf_read_dev(ddev,
+		p[234 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_CFG_REG_0_REG + j * 0x80);
-		p[235 + i] = dsaf_read_dev(ddev,
+		p[237 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
-		p[238 + i] = dsaf_read_dev(ddev,
+		p[240 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
-		p[241 + i] = dsaf_read_dev(ddev,
+		p[243 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
-		p[244 + i] = dsaf_read_dev(ddev,
+		p[246 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
-		p[245 + i] = dsaf_read_dev(ddev,
+		p[249 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
-		p[248 + i] = dsaf_read_dev(ddev,
+		p[252 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
-		p[251 + i] = dsaf_read_dev(ddev,
+		p[255 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
-		p[254 + i] = dsaf_read_dev(ddev,
+		p[258 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
-		p[257 + i] = dsaf_read_dev(ddev,
+		p[261 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
-		p[260 + i] = dsaf_read_dev(ddev,
+		p[264 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_INER_ST_0_REG + j * 0x80);
-		p[263 + i] = dsaf_read_dev(ddev,
+		p[267 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
-		p[266 + i] = dsaf_read_dev(ddev,
+		p[270 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
-		p[269 + i] = dsaf_read_dev(ddev,
+		p[273 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
-		p[272 + i] = dsaf_read_dev(ddev,
+		p[276 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
-		p[275 + i] = dsaf_read_dev(ddev,
+		p[279 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
-		p[278 + i] = dsaf_read_dev(ddev,
+		p[282 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
-		p[281 + i] = dsaf_read_dev(ddev,
+		p[285 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
-		p[284 + i] = dsaf_read_dev(ddev,
+		p[288 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
-		p[287 + i] = dsaf_read_dev(ddev,
+		p[291 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
-		p[290 + i] = dsaf_read_dev(ddev,
+		p[294 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
-		p[293 + i] = dsaf_read_dev(ddev,
+		p[297 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
-		p[296 + i] = dsaf_read_dev(ddev,
+		p[300 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
-		p[299 + i] = dsaf_read_dev(ddev,
+		p[303 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
-		p[302 + i] = dsaf_read_dev(ddev,
+		p[306 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
-		p[305 + i] = dsaf_read_dev(ddev,
+		p[309 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
-		p[308 + i] = dsaf_read_dev(ddev,
+		p[312 + i] = dsaf_read_dev(ddev,
 				DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
 	}
 
 	/* dsaf onode registers */
 	for (i = 0; i < DSAF_XOD_NUM; i++) {
-		p[311 + i] = dsaf_read_dev(ddev,
+		p[315 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
-		p[319 + i] = dsaf_read_dev(ddev,
+		p[323 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
-		p[327 + i] = dsaf_read_dev(ddev,
+		p[331 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
-		p[335 + i] = dsaf_read_dev(ddev,
+		p[339 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
-		p[343 + i] = dsaf_read_dev(ddev,
+		p[347 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
-		p[351 + i] = dsaf_read_dev(ddev,
+		p[355 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
 	}
 
-	p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
-	p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
-	p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
+	p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
+	p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
+	p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
 
 	for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
 		j = i * DSAF_COMM_CHN + port;
-		p[362 + i] = dsaf_read_dev(ddev,
+		p[366 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_GNT_L_0_REG + j * 0x90);
-		p[365 + i] = dsaf_read_dev(ddev,
+		p[369 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_GNT_H_0_REG + j * 0x90);
-		p[368 + i] = dsaf_read_dev(ddev,
+		p[372 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
-		p[371 + i] = dsaf_read_dev(ddev,
+		p[375 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
-		p[374 + i] = dsaf_read_dev(ddev,
+		p[378 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
-		p[377 + i] = dsaf_read_dev(ddev,
+		p[381 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
-		p[380 + i] = dsaf_read_dev(ddev,
+		p[384 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
-		p[383 + i] = dsaf_read_dev(ddev,
+		p[387 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
-		p[386 + i] = dsaf_read_dev(ddev,
+		p[390 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
-		p[389 + i] = dsaf_read_dev(ddev,
+		p[393 + i] = dsaf_read_dev(ddev,
 				DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
 	}
 
-	p[392] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
-	p[393] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
-	p[394] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
-	p[395] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
 	p[396] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
 	p[397] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
 	p[398] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
 	p[399] = dsaf_read_dev(ddev,
-		DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
 	p[400] = dsaf_read_dev(ddev,
-		DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
 	p[401] = dsaf_read_dev(ddev,
-		DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
 	p[402] = dsaf_read_dev(ddev,
-		DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
 	p[403] = dsaf_read_dev(ddev,
-		DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
+		DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
 	p[404] = dsaf_read_dev(ddev,
+		DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
+	p[405] = dsaf_read_dev(ddev,
+		DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
+	p[406] = dsaf_read_dev(ddev,
+		DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
+	p[407] = dsaf_read_dev(ddev,
+		DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
+	p[408] = dsaf_read_dev(ddev,
 		DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
 
 	/* dsaf voq registers */
 	for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
 		j = (i * DSAF_COMM_CHN + port) * 0x90;
-		p[405 + i] = dsaf_read_dev(ddev,
+		p[409 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
-		p[408 + i] = dsaf_read_dev(ddev,
+		p[412 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
-		p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
-		p[414 + i] = dsaf_read_dev(ddev,
+		p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
+		p[418 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
-		p[417 + i] = dsaf_read_dev(ddev,
+		p[421 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
-		p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
-		p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
-		p[426 + i] = dsaf_read_dev(ddev,
+		p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
+		p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
+		p[430 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
-		p[429 + i] = dsaf_read_dev(ddev,
+		p[433 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
-		p[432 + i] = dsaf_read_dev(ddev,
+		p[436 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
-		p[435 + i] = dsaf_read_dev(ddev,
+		p[439 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
-		p[438 + i] = dsaf_read_dev(ddev,
+		p[442 + i] = dsaf_read_dev(ddev,
 			DSAF_VOQ_BP_ALL_THRD_0_REG + j);
 	}
 
 	/* dsaf tbl registers */
-	p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
-	p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
-	p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
-	p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
-	p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
-	p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
-	p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
-	p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
-	p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
-	p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
-	p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
-	p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
-	p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
-	p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
-	p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
-	p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
-	p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
-	p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
-	p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
-	p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
-	p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
-	p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
-	p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
+	p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
+	p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
+	p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
+	p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
+	p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
+	p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
+	p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
+	p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
+	p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+	p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
+	p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
+	p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
+	p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
+	p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+	p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
+	p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+	p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+	p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+	p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+	p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+	p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+	p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+	p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
 
 	for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
 		j = i * 0x8;
-		p[464 + 2 * i] = dsaf_read_dev(ddev,
+		p[468 + 2 * i] = dsaf_read_dev(ddev,
 			DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
-		p[465 + 2 * i] = dsaf_read_dev(ddev,
+		p[469 + 2 * i] = dsaf_read_dev(ddev,
 			DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
 	}
 
-	p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
-	p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
-	p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
-	p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
-	p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
-	p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
-	p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
-	p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
-	p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
-	p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
-	p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
-	p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
+	p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
+	p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
+	p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
+	p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
+	p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
+	p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
+	p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
+	p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
+	p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
+	p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
+	p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
+	p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
 
 	/* dsaf other registers */
-	p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
-	p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
-	p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
-	p[495] = dsaf_read_dev(ddev,
+	p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
+	p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
+	p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
+	p[499] = dsaf_read_dev(ddev,
 		DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
-	p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
-	p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+	p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
+	p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
 
 	if (!is_ver1)
-		p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
+		p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
 
 	/* mark end of dsaf regs */
-	for (i = 499; i < 504; i++)
+	for (i = 503; i < 504; i++)
 		p[i] = 0xdddddddd;
 }
 
@@ -2673,58 +2750,156 @@
 	return DSAF_DUMP_REGS_NUM;
 }
 
+static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
+{
+	struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
+	struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+	struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf};
+	struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
+	struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+	struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port};
+	struct dsaf_drv_mac_single_dest_entry mask_entry;
+	struct dsaf_drv_tbl_tcam_key temp_key, mask_key;
+	struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+	u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+	struct dsaf_drv_tbl_tcam_key mac_key;
+	struct hns_mac_cb *mac_cb;
+	u8 addr[ETH_ALEN] = {0};
+	u8 port_num;
+	u16 mskid;
+
+	/* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+	hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+	entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+	if (entry_index != DSAF_INVALID_ENTRY_IDX)
+		return;
+
+	/* put promisc tcam entry in the end. */
+	/* 1. set promisc unicast vague tcam entry. */
+	entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+	if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+		dev_err(dsaf_dev->dev,
+			"enable uc promisc failed (port:%#x)\n",
+			port);
+		return;
+	}
+
+	mac_cb = dsaf_dev->mac_cb[port];
+	(void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num);
+	tbl_tcam_ucast.tbl_ucast_out_port = port_num;
+
+	/* config uc vague table */
+	hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+				   &tbl_tcam_mask_uc, &tbl_tcam_ucast);
+
+	/* update software entry */
+	soft_mac_entry = priv->soft_mac_tbl;
+	soft_mac_entry += entry_index;
+	soft_mac_entry->index = entry_index;
+	soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+	soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+	/* step back to the START for mc. */
+	soft_mac_entry = priv->soft_mac_tbl;
+
+	/* 2. set promisc multicast vague tcam entry. */
+	entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+	if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+		dev_err(dsaf_dev->dev,
+			"enable mc promisc failed (port:%#x)\n",
+			port);
+		return;
+	}
+
+	memset(&mask_entry, 0x0, sizeof(mask_entry));
+	memset(&mask_key, 0x0, sizeof(mask_key));
+	memset(&temp_key, 0x0, sizeof(temp_key));
+	mask_entry.addr[0] = 0x01;
+	hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
+			     port, mask_entry.addr);
+	tbl_tcam_mcast.tbl_mcast_item_vld = 1;
+	tbl_tcam_mcast.tbl_mcast_old_en = 0;
+
+	if (port < DSAF_SERVICE_NW_NUM) {
+		mskid = port;
+	} else if (port >= DSAF_BASE_INNER_PORT_NUM) {
+		mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+	} else {
+		dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
+			dsaf_dev->ae_dev.name, port,
+			mask_key.high.val, mask_key.low.val);
+		return;
+	}
+
+	dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+		     mskid % 32, 1);
+	memcpy(&temp_key, &mask_key, sizeof(mask_key));
+	hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+				   (struct dsaf_tbl_tcam_data *)(&mask_key),
+				   &tbl_tcam_mcast);
+
+	/* update software entry */
+	soft_mac_entry += entry_index;
+	soft_mac_entry->index = entry_index;
+	soft_mac_entry->tcam_key.high.val = temp_key.high.val;
+	soft_mac_entry->tcam_key.low.val = temp_key.low.val;
+}
+
+static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port)
+{
+	struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+	struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0};
+	struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
+	struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+	struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0};
+	struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0};
+	struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+	u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+	struct dsaf_drv_tbl_tcam_key mac_key;
+	u8 addr[ETH_ALEN] = {0};
+
+	/* 1. delete uc vague tcam entry. */
+	/* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+	hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+	entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+	if (entry_index == DSAF_INVALID_ENTRY_IDX)
+		return;
+
+	/* config uc vague table */
+	hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+				   &tbl_tcam_mask, &tbl_tcam_ucast);
+	/* update soft management table. */
+	soft_mac_entry = priv->soft_mac_tbl;
+	soft_mac_entry += entry_index;
+	soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+	/* step back to the START for mc. */
+	soft_mac_entry = priv->soft_mac_tbl;
+
+	/* 2. delete mc vague tcam entry. */
+	addr[0] = 0x01;
+	memset(&mac_key, 0x0, sizeof(mac_key));
+	hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+	entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+	if (entry_index == DSAF_INVALID_ENTRY_IDX)
+		return;
+
+	/* config mc vague table */
+	hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+				   &tbl_tcam_mask, &tbl_tcam_mcast);
+	/* update soft management table. */
+	soft_mac_entry += entry_index;
+	soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+}
+
 /* Reserve the last TCAM entry for promisc support */
-#define dsaf_promisc_tcam_entry(port) \
-	(DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port))
 void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
 			       u32 port, bool enable)
 {
-	struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
-	struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
-	u16 entry_index;
-	struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask;
-	struct dsaf_tbl_tcam_mcast_cfg mac_data = {0};
-
-	if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev))
-		return;
-
-	/* find the tcam entry index for promisc */
-	entry_index = dsaf_promisc_tcam_entry(port);
-
-	memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
-	memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
-
-	/* config key mask */
-	if (enable) {
-		dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
-			       DSAF_TBL_TCAM_KEY_PORT_M,
-			       DSAF_TBL_TCAM_KEY_PORT_S, port);
-		dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan,
-			       DSAF_TBL_TCAM_KEY_PORT_M,
-			       DSAF_TBL_TCAM_KEY_PORT_S, 0xf);
-
-		/* SUB_QID */
-		dsaf_set_bit(mac_data.tbl_mcast_port_msk[0],
-			     DSAF_SERVICE_NW_NUM, true);
-		mac_data.tbl_mcast_item_vld = true;	/* item_vld bit */
-	} else {
-		mac_data.tbl_mcast_item_vld = false;	/* item_vld bit */
-	}
-
-	dev_dbg(dsaf_dev->dev,
-		"set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
-		dsaf_dev->ae_dev.name, tbl_tcam_data.high.val,
-		tbl_tcam_data.low.val, entry_index);
-
-	/* config promisc entry with mask */
-	hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
-			     (struct dsaf_tbl_tcam_data *)&tbl_tcam_data,
-			     (struct dsaf_tbl_tcam_data *)&tbl_tcam_mask,
-			     &mac_data);
-
-	/* config software entry */
-	soft_mac_entry += entry_index;
-	soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
+	if (enable)
+		set_promisc_tcam_enable(dsaf_dev, port);
+	else
+		set_promisc_tcam_disable(dsaf_dev, port);
 }
 
 int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 74d935d..b9733b0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -176,7 +176,7 @@
 #define DSAF_INODE_IN_DATA_STP_DISC_0_REG	0x1A50
 #define DSAF_INODE_GE_FC_EN_0_REG		0x1B00
 #define DSAF_INODE_VC0_IN_PKT_NUM_0_REG		0x1B50
-#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG		0x1C00
+#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG		0x103C
 #define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG	0x1C00
 #define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET	0x100
 #define DSAF_INODE_IN_PRIO_PAUSE_OFFSET		0x50
@@ -404,11 +404,11 @@
 #define RCB_ECC_ERR_ADDR4_REG			0x460
 #define RCB_ECC_ERR_ADDR5_REG			0x464
 
-#define RCB_COM_SF_CFG_INTMASK_RING		0x480
-#define RCB_COM_SF_CFG_RING_STS			0x484
-#define RCB_COM_SF_CFG_RING			0x488
-#define RCB_COM_SF_CFG_INTMASK_BD		0x48C
-#define RCB_COM_SF_CFG_BD_RINT_STS		0x470
+#define RCB_COM_SF_CFG_INTMASK_RING		0x470
+#define RCB_COM_SF_CFG_RING_STS			0x474
+#define RCB_COM_SF_CFG_RING			0x478
+#define RCB_COM_SF_CFG_INTMASK_BD		0x47C
+#define RCB_COM_SF_CFG_BD_RINT_STS		0x480
 #define RCB_COM_RCB_RD_BD_BUSY			0x490
 #define RCB_COM_RCB_FBD_CRT_EN			0x494
 #define RCB_COM_AXI_WR_ERR_INTMASK		0x498
@@ -534,6 +534,7 @@
 #define GMAC_LD_LINK_COUNTER_REG		0x01D0UL
 #define GMAC_LOOP_REG				0x01DCUL
 #define GMAC_RECV_CONTROL_REG			0x01E0UL
+#define GMAC_PCS_RX_EN_REG			0x01E4UL
 #define GMAC_VLAN_CODE_REG			0x01E8UL
 #define GMAC_RX_OVERRUN_CNT_REG			0x01ECUL
 #define GMAC_RX_LENGTHFIELD_ERR_CNT_REG		0x01F4UL
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 28e9078..6242249 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1186,6 +1186,9 @@
 	if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
 		phy_dev->autoneg = false;
 
+	if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
+		phy_stop(phy_dev);
+
 	return 0;
 }
 
@@ -1281,6 +1284,22 @@
 	return cpu;
 }
 
+static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < q_num * 2; i++) {
+		if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+			irq_set_affinity_hint(priv->ring_data[i].ring->irq,
+					      NULL);
+			free_irq(priv->ring_data[i].ring->irq,
+				 &priv->ring_data[i]);
+			priv->ring_data[i].ring->irq_init_flag =
+				RCB_IRQ_NOT_INITED;
+		}
+	}
+}
+
 static int hns_nic_init_irq(struct hns_nic_priv *priv)
 {
 	struct hnae_handle *h = priv->ae_handle;
@@ -1306,7 +1325,7 @@
 		if (ret) {
 			netdev_err(priv->netdev, "request irq(%d) fail\n",
 				   rd->ring->irq);
-			return ret;
+			goto out_free_irq;
 		}
 		disable_irq(rd->ring->irq);
 
@@ -1321,6 +1340,10 @@
 	}
 
 	return 0;
+
+out_free_irq:
+	hns_nic_free_irq(h->q_num, priv);
+	return ret;
 }
 
 static int hns_nic_net_up(struct net_device *ndev)
@@ -1330,6 +1353,9 @@
 	int i, j;
 	int ret;
 
+	if (!test_bit(NIC_STATE_DOWN, &priv->state))
+		return 0;
+
 	ret = hns_nic_init_irq(priv);
 	if (ret != 0) {
 		netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
@@ -1365,6 +1391,7 @@
 	for (j = i - 1; j >= 0; j--)
 		hns_nic_ring_close(ndev, j);
 
+	hns_nic_free_irq(h->q_num, priv);
 	set_bit(NIC_STATE_DOWN, &priv->state);
 
 	return ret;
@@ -1482,11 +1509,19 @@
 }
 
 static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
+#define HNS_TX_TIMEO_LIMIT (40 * HZ)
 static void hns_nic_net_timeout(struct net_device *ndev)
 {
 	struct hns_nic_priv *priv = netdev_priv(ndev);
 
-	hns_tx_timeout_reset(priv);
+	if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
+		ndev->watchdog_timeo *= 2;
+		netdev_info(ndev, "watchdog_timo changed to %d.\n",
+			    ndev->watchdog_timeo);
+	} else {
+		ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+		hns_tx_timeout_reset(priv);
+	}
 }
 
 static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
@@ -2049,11 +2084,11 @@
 		= container_of(work, struct hns_nic_priv, service_task);
 	struct hnae_handle *h = priv->ae_handle;
 
+	hns_nic_reset_subtask(priv);
 	hns_nic_update_link_status(priv->netdev);
 	h->dev->ops->update_led_status(h);
 	hns_nic_update_stats(priv->netdev);
 
-	hns_nic_reset_subtask(priv);
 	hns_nic_service_event_complete(priv);
 }
 
@@ -2339,7 +2374,7 @@
 	ndev->min_mtu = MAC_MIN_MTU;
 	switch (priv->enet_ver) {
 	case AE_VERSION_2:
-		ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+		ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
 		ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 			NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
 			NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 525d8b8..91f48c0 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1172,11 +1172,15 @@
 
 map_failed_frags:
 	last = i+1;
-	for (i = 0; i < last; i++)
+	for (i = 1; i < last; i++)
 		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
 			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
 			       DMA_TO_DEVICE);
 
+	dma_unmap_single(&adapter->vdev->dev,
+			 descs[0].fields.address,
+			 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+			 DMA_TO_DEVICE);
 map_failed:
 	if (!firmware_has_feature(FW_FEATURE_CMO))
 		netdev_err(netdev, "tx: unable to map xmit buffer\n");
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5ab21a1..c8704b1 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1939,8 +1939,9 @@
 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
 {
 	struct ibmvnic_rwi *rwi;
+	unsigned long flags;
 
-	mutex_lock(&adapter->rwi_lock);
+	spin_lock_irqsave(&adapter->rwi_lock, flags);
 
 	if (!list_empty(&adapter->rwi_list)) {
 		rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
@@ -1950,7 +1951,7 @@
 		rwi = NULL;
 	}
 
-	mutex_unlock(&adapter->rwi_lock);
+	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
 	return rwi;
 }
 
@@ -2025,6 +2026,7 @@
 	struct list_head *entry, *tmp_entry;
 	struct ibmvnic_rwi *rwi, *tmp;
 	struct net_device *netdev = adapter->netdev;
+	unsigned long flags;
 	int ret;
 
 	if (adapter->state == VNIC_REMOVING ||
@@ -2041,21 +2043,21 @@
 		goto err;
 	}
 
-	mutex_lock(&adapter->rwi_lock);
+	spin_lock_irqsave(&adapter->rwi_lock, flags);
 
 	list_for_each(entry, &adapter->rwi_list) {
 		tmp = list_entry(entry, struct ibmvnic_rwi, list);
 		if (tmp->reset_reason == reason) {
 			netdev_dbg(netdev, "Skipping matching reset\n");
-			mutex_unlock(&adapter->rwi_lock);
+			spin_unlock_irqrestore(&adapter->rwi_lock, flags);
 			ret = EBUSY;
 			goto err;
 		}
 	}
 
-	rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
+	rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
 	if (!rwi) {
-		mutex_unlock(&adapter->rwi_lock);
+		spin_unlock_irqrestore(&adapter->rwi_lock, flags);
 		ibmvnic_close(netdev);
 		ret = ENOMEM;
 		goto err;
@@ -2069,7 +2071,7 @@
 	}
 	rwi->reset_reason = reason;
 	list_add_tail(&rwi->list, &adapter->rwi_list);
-	mutex_unlock(&adapter->rwi_lock);
+	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
 	adapter->resetting = true;
 	netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
 	schedule_work(&adapter->ibmvnic_reset);
@@ -4700,7 +4702,7 @@
 
 	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
 	INIT_LIST_HEAD(&adapter->rwi_list);
-	mutex_init(&adapter->rwi_lock);
+	spin_lock_init(&adapter->rwi_lock);
 	adapter->resetting = false;
 
 	adapter->mac_change_pending = false;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 735f481..0946539 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1068,7 +1068,7 @@
 	struct tasklet_struct tasklet;
 	enum vnic_state state;
 	enum ibmvnic_reset_reason reset_reason;
-	struct mutex rwi_lock;
+	spinlock_t rwi_lock;
 	struct list_head rwi_list;
 	struct work_struct ibmvnic_reset;
 	bool resetting;
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 37c7694..e1f821e 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -173,10 +173,14 @@
 	struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
 						     ptp_clock_info);
 	unsigned long flags;
-	u64 ns;
+	u64 cycles, ns;
 
 	spin_lock_irqsave(&adapter->systim_lock, flags);
-	ns = timecounter_read(&adapter->tc);
+
+	/* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */
+	cycles = adapter->cc.read(&adapter->cc);
+	ns = timecounter_cyc2time(&adapter->tc, cycles);
+
 	spin_unlock_irqrestore(&adapter->systim_lock, flags);
 
 	*ts = ns_to_timespec64(ns);
@@ -232,9 +236,12 @@
 						     systim_overflow_work.work);
 	struct e1000_hw *hw = &adapter->hw;
 	struct timespec64 ts;
+	u64 ns;
 
-	adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
+	/* Update the timecounter */
+	ns = timecounter_read(&adapter->tc);
 
+	ts = ns_to_timespec64(ns);
 	e_dbg("SYSTIM overflow check at %lld.%09lu\n",
 	      (long long) ts.tv_sec, ts.tv_nsec);
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 3c34270..ed9d3fc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1539,17 +1539,17 @@
 		netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
 
 	/* Copy the address first, so that we avoid a possible race with
-	 * .set_rx_mode(). If we copy after changing the address in the filter
-	 * list, we might open ourselves to a narrow race window where
-	 * .set_rx_mode could delete our dev_addr filter and prevent traffic
-	 * from passing.
+	 * .set_rx_mode().
+	 * - Remove old address from MAC filter
+	 * - Copy new address
+	 * - Add new address to MAC filter
 	 */
-	ether_addr_copy(netdev->dev_addr, addr->sa_data);
-
 	spin_lock_bh(&vsi->mac_filter_hash_lock);
 	i40e_del_mac_filter(vsi, netdev->dev_addr);
-	i40e_add_mac_filter(vsi, addr->sa_data);
+	ether_addr_copy(netdev->dev_addr, addr->sa_data);
+	i40e_add_mac_filter(vsi, netdev->dev_addr);
 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
 	if (vsi->type == I40E_VSI_MAIN) {
 		i40e_status ret;
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index add124e..b27f7a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -4,6 +4,7 @@
 #include "ixgbe.h"
 #include <net/xfrm.h>
 #include <crypto/aead.h>
+#include <linux/if_bridge.h>
 
 /**
  * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
@@ -676,7 +677,8 @@
 	} else {
 		struct tx_sa tsa;
 
-		if (adapter->num_vfs)
+		if (adapter->num_vfs &&
+		    adapter->bridge_mode != BRIDGE_MODE_VEPA)
 			return -EOPNOTSUPP;
 
 		/* find the first unused index */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index eea63a9..f6ffd9f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -699,7 +699,6 @@
 	u8 num_tcs = adapter->hw_tcs;
 	u32 reg_val;
 	u32 queue;
-	u32 word;
 
 	/* remove VLAN filters beloning to this VF */
 	ixgbe_clear_vf_vlans(adapter, vf);
@@ -754,6 +753,14 @@
 		}
 	}
 
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 word;
+
 	/* Clear VF's mailbox memory */
 	for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
@@ -827,6 +834,8 @@
 	/* reset the filters for the device */
 	ixgbe_vf_reset_event(adapter, vf);
 
+	ixgbe_vf_clear_mbx(adapter, vf);
+
 	/* set vf mac address */
 	if (!is_zero_ether_addr(vf_mac))
 		ixgbe_set_vf_mac(adapter, vf, vf_mac);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b4ed7d3..a78a392 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -406,7 +406,6 @@
 	struct mvneta_pcpu_stats __percpu	*stats;
 
 	int pkt_size;
-	unsigned int frag_size;
 	void __iomem *base;
 	struct mvneta_rx_queue *rxqs;
 	struct mvneta_tx_queue *txqs;
@@ -2905,7 +2904,9 @@
 	if (!pp->bm_priv) {
 		/* Set Offset */
 		mvneta_rxq_offset_set(pp, rxq, 0);
-		mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
+		mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
+					PAGE_SIZE :
+					MVNETA_RX_BUF_SIZE(pp->pkt_size));
 		mvneta_rxq_bm_disable(pp, rxq);
 		mvneta_rxq_fill(pp, rxq, rxq->size);
 	} else {
@@ -3749,7 +3750,6 @@
 	int ret;
 
 	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
-	pp->frag_size = PAGE_SIZE;
 
 	ret = mvneta_setup_rxqs(pp);
 	if (ret)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6320e08..f8e4808 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4292,12 +4292,15 @@
 	case PHY_INTERFACE_MODE_10GKR:
 	case PHY_INTERFACE_MODE_XAUI:
 	case PHY_INTERFACE_MODE_NA:
-		phylink_set(mask, 10000baseCR_Full);
-		phylink_set(mask, 10000baseSR_Full);
-		phylink_set(mask, 10000baseLR_Full);
-		phylink_set(mask, 10000baseLRM_Full);
-		phylink_set(mask, 10000baseER_Full);
-		phylink_set(mask, 10000baseKR_Full);
+		if (port->gop_id == 0) {
+			phylink_set(mask, 10000baseT_Full);
+			phylink_set(mask, 10000baseCR_Full);
+			phylink_set(mask, 10000baseSR_Full);
+			phylink_set(mask, 10000baseLR_Full);
+			phylink_set(mask, 10000baseLRM_Full);
+			phylink_set(mask, 10000baseER_Full);
+			phylink_set(mask, 10000baseKR_Full);
+		}
 		/* Fall-through */
 	case PHY_INTERFACE_MODE_RGMII:
 	case PHY_INTERFACE_MODE_RGMII_ID:
@@ -4308,7 +4311,6 @@
 		phylink_set(mask, 10baseT_Full);
 		phylink_set(mask, 100baseT_Half);
 		phylink_set(mask, 100baseT_Full);
-		phylink_set(mask, 10000baseT_Full);
 		/* Fall-through */
 	case PHY_INTERFACE_MODE_1000BASEX:
 	case PHY_INTERFACE_MODE_2500BASEX:
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index babcfd9..7521304 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -2064,9 +2064,11 @@
 {
 	struct mlx4_cmd_mailbox *mailbox;
 	__be32 *outbox;
+	u64 qword_field;
 	u32 dword_field;
-	int err;
+	u16 word_field;
 	u8 byte_field;
+	int err;
 	static const u8 a0_dmfs_query_hw_steering[] =  {
 		[0] = MLX4_STEERING_DMFS_A0_DEFAULT,
 		[1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
@@ -2094,19 +2096,32 @@
 
 	/* QPC/EEC/CQC/EQC/RDMARC attributes */
 
-	MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
-	MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
-	MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
-	MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
-	MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
-	MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
-	MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
-	MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
-	MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
-	MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
-	MLX4_GET(param->num_sys_eqs,   outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
-	MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
-	MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+	MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
+	param->qpc_base = qword_field & ~((u64)0x1f);
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
+	param->log_num_qps = byte_field & 0x1f;
+	MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
+	param->srqc_base = qword_field & ~((u64)0x1f);
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
+	param->log_num_srqs = byte_field & 0x1f;
+	MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
+	param->cqc_base = qword_field & ~((u64)0x1f);
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
+	param->log_num_cqs = byte_field & 0x1f;
+	MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
+	param->altc_base = qword_field;
+	MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
+	param->auxc_base = qword_field;
+	MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
+	param->eqc_base = qword_field & ~((u64)0x1f);
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
+	param->log_num_eqs = byte_field & 0x1f;
+	MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
+	param->num_sys_eqs = word_field & 0xfff;
+	MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+	param->rdmarc_base = qword_field & ~((u64)0x1f);
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
+	param->log_rd_per_qp = byte_field & 0x7;
 
 	MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
 	if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
@@ -2125,22 +2140,21 @@
 	/* steering attributes */
 	if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
 		MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
-		MLX4_GET(param->log_mc_entry_sz, outbox,
-			 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
-		MLX4_GET(param->log_mc_table_sz, outbox,
-			 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
-		MLX4_GET(byte_field, outbox,
-			 INIT_HCA_FS_A0_OFFSET);
+		MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+		param->log_mc_entry_sz = byte_field & 0x1f;
+		MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+		param->log_mc_table_sz = byte_field & 0x1f;
+		MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
 		param->dmfs_high_steer_mode =
 			a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
 	} else {
 		MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
-		MLX4_GET(param->log_mc_entry_sz, outbox,
-			 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
-		MLX4_GET(param->log_mc_hash_sz,  outbox,
-			 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
-		MLX4_GET(param->log_mc_table_sz, outbox,
-			 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+		MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+		param->log_mc_entry_sz = byte_field & 0x1f;
+		MLX4_GET(byte_field,  outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+		param->log_mc_hash_sz = byte_field & 0x1f;
+		MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+		param->log_mc_table_sz = byte_field & 0x1f;
 	}
 
 	/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
@@ -2164,15 +2178,18 @@
 	/* TPT attributes */
 
 	MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
-	MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
-	MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+	MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
+	param->mw_enabled = byte_field >> 7;
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+	param->log_mpt_sz = byte_field & 0x3f;
 	MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
 	MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
 
 	/* UAR attributes */
 
 	MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
-	MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+	MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+	param->log_uar_sz = byte_field & 0xf;
 
 	/* phv_check enable */
 	MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 98dd3e0..5e54230 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1101,11 +1101,6 @@
 			      struct ethtool_ts_info *info)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
-	int ret;
-
-	ret = ethtool_op_get_ts_info(priv->netdev, info);
-	if (ret)
-		return ret;
 
 	info->phc_index = mlx5_clock_get_ptp_index(mdev);
 
@@ -1113,9 +1108,9 @@
 	    info->phc_index == -1)
 		return 0;
 
-	info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
-				 SOF_TIMESTAMPING_RX_HARDWARE |
-				 SOF_TIMESTAMPING_RAW_HARDWARE;
+	info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+				SOF_TIMESTAMPING_RX_HARDWARE |
+				SOF_TIMESTAMPING_RAW_HARDWARE;
 
 	info->tx_types = BIT(HWTSTAMP_TX_OFF) |
 			 BIT(HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index faa84b4..7365899 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -128,6 +128,8 @@
 	return !params->lro_en && frag_sz <= PAGE_SIZE;
 }
 
+#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
+					  MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
 static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
 					 struct mlx5e_params *params)
 {
@@ -138,6 +140,9 @@
 	if (!mlx5e_rx_is_linear_skb(mdev, params))
 		return false;
 
+	if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
+		return false;
+
 	if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
 		return true;
 
@@ -1383,6 +1388,7 @@
 	struct mlx5_core_dev *mdev = c->mdev;
 	struct mlx5_rate_limit rl = {0};
 
+	cancel_work_sync(&sq->dim.work);
 	mlx5e_destroy_sq(mdev, sq->sqn);
 	if (sq->rate_limit) {
 		rl.rate = sq->rate_limit;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d543a5c..8262f09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1150,7 +1150,7 @@
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
 {
 	struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
-	struct mlx5e_xdpsq *xdpsq;
+	struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
 	struct mlx5_cqe64 *cqe;
 	int work_done = 0;
 
@@ -1161,10 +1161,11 @@
 		work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
 
 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
-	if (!cqe)
+	if (!cqe) {
+		if (unlikely(work_done))
+			goto out;
 		return 0;
-
-	xdpsq = &rq->xdpsq;
+	}
 
 	do {
 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
@@ -1179,6 +1180,7 @@
 		rq->handle_rx_cqe(rq, cqe);
 	} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 
+out:
 	if (xdpsq->doorbell) {
 		mlx5e_xmit_xdp_doorbell(xdpsq);
 		xdpsq->doorbell = false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index d57d51c..7047cc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -73,7 +73,6 @@
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
@@ -194,7 +193,6 @@
 			s->tx_nop               += sq_stats->nop;
 			s->tx_queue_stopped	+= sq_stats->stopped;
 			s->tx_queue_wake	+= sq_stats->wake;
-			s->tx_udp_seg_rem	+= sq_stats->udp_seg_rem;
 			s->tx_queue_dropped	+= sq_stats->dropped;
 			s->tx_cqe_err		+= sq_stats->cqe_err;
 			s->tx_recover		+= sq_stats->recover;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index c1064af..0ad7a16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -86,7 +86,6 @@
 	u64 tx_recover;
 	u64 tx_cqes;
 	u64 tx_queue_wake;
-	u64 tx_udp_seg_rem;
 	u64 tx_cqe_err;
 	u64 tx_xdp_xmit;
 	u64 tx_xdp_full;
@@ -217,7 +216,6 @@
 	u64 csum_partial_inner;
 	u64 added_vlan_packets;
 	u64 nop;
-	u64 udp_seg_rem;
 #ifdef CONFIG_MLX5_EN_TLS
 	u64 tls_ooo;
 	u64 tls_resync_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index ea7dedc..d670647 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1133,13 +1133,6 @@
 	int err = 0;
 	u8 *smac_v;
 
-	if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
-		mlx5_core_warn(esw->dev,
-			       "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
-			       vport->vport);
-		return -EPERM;
-	}
-
 	esw_vport_cleanup_ingress_rules(esw, vport);
 
 	if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
@@ -1696,7 +1689,7 @@
 	int vport_num;
 	int err;
 
-	if (!MLX5_ESWITCH_MANAGER(dev))
+	if (!MLX5_VPORT_MANAGER(dev))
 		return 0;
 
 	esw_info(dev,
@@ -1765,7 +1758,7 @@
 
 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
 {
-	if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
+	if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
 		return;
 
 	esw_info(esw->dev, "cleanup\n");
@@ -1812,13 +1805,10 @@
 	mutex_lock(&esw->state_lock);
 	evport = &esw->vports[vport];
 
-	if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
+	if (evport->info.spoofchk && !is_valid_ether_addr(mac))
 		mlx5_core_warn(esw->dev,
-			       "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
+			       "Set invalid MAC while spoofchk is on, vport(%d)\n",
 			       vport);
-		err = -EPERM;
-		goto unlock;
-	}
 
 	err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
 	if (err) {
@@ -1964,6 +1954,10 @@
 	evport = &esw->vports[vport];
 	pschk = evport->info.spoofchk;
 	evport->info.spoofchk = spoofchk;
+	if (pschk && !is_valid_ether_addr(evport->info.mac))
+		mlx5_core_warn(esw->dev,
+			       "Spoofchk in set while MAC is invalid, vport(%d)\n",
+			       evport->vport);
 	if (evport->enabled && esw->mode == SRIOV_LEGACY)
 		err = esw_vport_ingress_config(esw, evport);
 	if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 37d114c..d181645 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -432,7 +432,7 @@
 
 	if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
 	    --fte->dests_size) {
-		modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
+		modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
 		update_fte = true;
 	}
 out:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 30f751e..f7154f35 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -81,6 +81,7 @@
 	struct mlxsw_core_port *ports;
 	unsigned int max_ports;
 	bool reload_fail;
+	bool fw_flash_in_progress;
 	unsigned long driver_priv[0];
 	/* driver_priv has to be always the last item */
 };
@@ -428,12 +429,16 @@
 	struct rcu_head rcu;
 };
 
-#define MLXSW_EMAD_TIMEOUT_MS 200
+#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS	3000
+#define MLXSW_EMAD_TIMEOUT_MS			200
 
 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
 {
 	unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
 
+	if (trans->core->fw_flash_in_progress)
+		timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
+
 	queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
 }
 
@@ -1854,6 +1859,18 @@
 }
 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
 
+void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
+{
+	mlxsw_core->fw_flash_in_progress = true;
+}
+EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
+
+void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
+{
+	mlxsw_core->fw_flash_in_progress = false;
+}
+EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
+
 static int __init mlxsw_core_module_init(void)
 {
 	int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c35be47..c4e4971 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -292,6 +292,9 @@
 			     u64 *p_single_size, u64 *p_double_size,
 			     u64 *p_linear_size);
 
+void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
+void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
+
 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
 			  enum mlxsw_res_id res_id);
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 5890fdf..c7901a3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -604,29 +604,31 @@
 		u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
 		u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
 		u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
+		char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
+
+		memcpy(ncqe, cqe, q->elem_size);
+		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
 
 		if (sendq) {
 			struct mlxsw_pci_queue *sdq;
 
 			sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
 			mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
-						 wqe_counter, cqe);
+						 wqe_counter, ncqe);
 			q->u.cq.comp_sdq_count++;
 		} else {
 			struct mlxsw_pci_queue *rdq;
 
 			rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
 			mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
-						 wqe_counter, q->u.cq.v, cqe);
+						 wqe_counter, q->u.cq.v, ncqe);
 			q->u.cq.comp_rdq_count++;
 		}
 		if (++items == credits)
 			break;
 	}
-	if (items) {
-		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+	if (items)
 		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
-	}
 }
 
 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 83f452b..72cdaa0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
 
 #define MLXSW_PCI_SW_RESET			0xF0010
 #define MLXSW_PCI_SW_RESET_RST_BIT		BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS	5000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS	13000
 #define MLXSW_PCI_SW_RESET_WAIT_MSECS		100
 #define MLXSW_PCI_FW_READY			0xA1844
 #define MLXSW_PCI_FW_READY_MASK			0xFFFF
@@ -53,6 +53,7 @@
 #define MLXSW_PCI_WQE_SIZE	32 /* 32 bytes per element */
 #define MLXSW_PCI_CQE01_SIZE	16 /* 16 bytes per element */
 #define MLXSW_PCI_CQE2_SIZE	32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE_MAX	MLXSW_PCI_CQE2_SIZE
 #define MLXSW_PCI_EQE_SIZE	16 /* 16 bytes per element */
 #define MLXSW_PCI_WQE_COUNT	(MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
 #define MLXSW_PCI_CQE01_COUNT	(MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index ada644d..de821a9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -308,8 +308,13 @@
 		},
 		.mlxsw_sp = mlxsw_sp
 	};
+	int err;
 
-	return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+	mlxsw_core_fw_flash_start(mlxsw_sp->core);
+	err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+	mlxsw_core_fw_flash_end(mlxsw_sp->core);
+
+	return err;
 }
 
 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
@@ -4630,12 +4635,15 @@
 							   lower_dev,
 							   upper_dev);
 		} else if (netif_is_lag_master(upper_dev)) {
-			if (info->linking)
+			if (info->linking) {
 				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
 							     upper_dev);
-			else
+			} else {
+				mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
+							    false);
 				mlxsw_sp_port_lag_leave(mlxsw_sp_port,
 							upper_dev);
+			}
 		} else if (netif_is_ovs_master(upper_dev)) {
 			if (info->linking)
 				err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 715d24f..562c442 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -696,8 +696,8 @@
 static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
 	.type			= MLXSW_SP_FID_TYPE_DUMMY,
 	.fid_size		= sizeof(struct mlxsw_sp_fid),
-	.start_index		= MLXSW_SP_RFID_BASE - 1,
-	.end_index		= MLXSW_SP_RFID_BASE - 1,
+	.start_index		= VLAN_N_VID - 1,
+	.end_index		= VLAN_N_VID - 1,
 	.ops			= &mlxsw_sp_fid_dummy_ops,
 };
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 4eb64cb..0d9ea37 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1761,7 +1761,7 @@
 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
 {
-	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
+	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 
 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index aaedf10..42f5bfa 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -802,14 +802,8 @@
 	u32 mac_addr_hi = 0;
 	u32 mac_addr_lo = 0;
 	u32 data;
-	int ret;
 
 	netdev = adapter->netdev;
-	lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_);
-	ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_,
-				       0, 1000, 20000, 100);
-	if (ret)
-		return ret;
 
 	/* setup auto duplex, and speed detection */
 	data = lan743x_csr_read(adapter, MAC_CR);
@@ -968,13 +962,10 @@
 
 		memset(&ksettings, 0, sizeof(ksettings));
 		phy_ethtool_get_link_ksettings(netdev, &ksettings);
-		local_advertisement = phy_read(phydev, MII_ADVERTISE);
-		if (local_advertisement < 0)
-			return;
-
-		remote_advertisement = phy_read(phydev, MII_LPA);
-		if (remote_advertisement < 0)
-			return;
+		local_advertisement =
+			ethtool_adv_to_mii_adv_t(phydev->advertising);
+		remote_advertisement =
+			ethtool_adv_to_mii_adv_t(phydev->lp_advertising);
 
 		lan743x_phy_update_flowcontrol(adapter,
 					       ksettings.base.duplex,
@@ -2722,8 +2713,9 @@
 	snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
 		 "pci-%s", pci_name(adapter->pdev));
 
-	/* set to internal PHY id */
-	adapter->mdiobus->phy_mask = ~(u32)BIT(1);
+	if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
+		/* LAN7430 uses internal phy at address 1 */
+		adapter->mdiobus->phy_mask = ~(u32)BIT(1);
 
 	/* register mdiobus */
 	ret = mdiobus_register(adapter->mdiobus);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index ed4e298..0bdd3c4 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -733,7 +733,7 @@
 	}
 
 	return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
-				 ENTRYTYPE_NORMAL);
+				 ENTRYTYPE_LOCKED);
 }
 
 static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 398011c..bf4302e 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -807,7 +807,7 @@
 	struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
 	struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
 	struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
-	u64 data0, data1 = 0, steer_ctrl = 0;
+	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
 	enum vxge_hw_status status;
 
 	status = vxge_hw_vpath_fw_api(vpath,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index bd19624..90148db 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -375,13 +375,29 @@
 		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
 			return -EOPNOTSUPP;
 
-		/* We need to store TCP flags in the IPv4 key space, thus
-		 * we need to ensure we include a IPv4 key layer if we have
-		 * not done so already.
+		/* We need to store TCP flags in the either the IPv4 or IPv6 key
+		 * space, thus we need to ensure we include a IPv4/IPv6 key
+		 * layer if we have not done so already.
 		 */
-		if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
-			key_layer |= NFP_FLOWER_LAYER_IPV4;
-			key_size += sizeof(struct nfp_flower_ipv4);
+		if (!key_basic)
+			return -EOPNOTSUPP;
+
+		if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
+		    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
+			switch (key_basic->n_proto) {
+			case cpu_to_be16(ETH_P_IP):
+				key_layer |= NFP_FLOWER_LAYER_IPV4;
+				key_size += sizeof(struct nfp_flower_ipv4);
+				break;
+
+			case cpu_to_be16(ETH_P_IPV6):
+				key_layer |= NFP_FLOWER_LAYER_IPV6;
+				key_size += sizeof(struct nfp_flower_ipv6);
+				break;
+
+			default:
+				return -EOPNOTSUPP;
+			}
 		}
 	}
 
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 052b3d2..c662c6f 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -912,7 +912,7 @@
 	.ndo_validate_addr	= eth_validate_addr,
 };
 
-static void __init get_mac_address(struct net_device *dev)
+static void get_mac_address(struct net_device *dev)
 {
 	struct w90p910_ether *ether = netdev_priv(dev);
 	struct platform_device *pdev;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 0ea141e..6547a9d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1125,7 +1125,8 @@
 		return -EINVAL;
 	}
 	val = nx_get_bios_version(adapter);
-	netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
+	if (netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios))
+		return -EIO;
 	if ((__force u32)val != bios) {
 		dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
 				fw_name[fw_type]);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index a713826..bed8f48 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12669,8 +12669,9 @@
 	MFW_DRV_MSG_BW_UPDATE10,
 	MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
 	MFW_DRV_MSG_BW_UPDATE11,
-	MFW_DRV_MSG_OEM_CFG_UPDATE,
+	MFW_DRV_MSG_RESERVED,
 	MFW_DRV_MSG_GET_TLV_REQ,
+	MFW_DRV_MSG_OEM_CFG_UPDATE,
 	MFW_DRV_MSG_MAX
 };
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 14ac9ca..2fa1c05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -2485,6 +2485,7 @@
 		if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
 			DP_NOTICE(cdev,
 				  "Unable to map frag - dropping packet\n");
+			rc = -ENOMEM;
 			goto err;
 		}
 
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 4bbefd9..3017057 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -327,8 +327,10 @@
 
 	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
 		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
-	else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5)
-		packet_len += sizeof(struct rmnet_map_v5_csum_header);
+	else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
+		if (!maph->cd_bit)
+			packet_len += sizeof(struct rmnet_map_v5_csum_header);
+	}
 
 	if (((int)skb->len - (int)packet_len) < 0)
 		return NULL;
@@ -779,9 +781,30 @@
 	if (iph->version == 4) {
 		protocol = iph->protocol;
 		ip_len = iph->ihl * 4;
+
+		/* Don't allow coalescing of any packets with IP options */
+		if (iph->ihl != 5)
+			gro = false;
 	} else if (iph->version == 6) {
+		__be16 frag_off;
+
 		protocol = ((struct ipv6hdr *)iph)->nexthdr;
-		ip_len = sizeof(struct ipv6hdr);
+		ip_len = ipv6_skip_exthdr(coal_skb, sizeof(struct ipv6hdr),
+					  &protocol, &frag_off);
+
+		/* If we run into a problem, or this has a fragment header
+		 * (which should technically not be possible, if the HW
+		 * works as intended...), bail.
+		 */
+		if (ip_len < 0 || frag_off) {
+			priv->stats.coal.coal_ip_invalid++;
+			return;
+		} else if (ip_len > sizeof(struct ipv6hdr)) {
+			/* Don't allow coalescing of any packets with IPv6
+			 * extension headers.
+			 */
+			gro = false;
+		}
 	} else {
 		priv->stats.coal.coal_ip_invalid++;
 		return;
@@ -818,6 +841,7 @@
 						return;
 
 					__skb_queue_tail(list, new_skb);
+					start += pkt_len * gro_count;
 					gro_count = 0;
 				}
 
@@ -858,7 +882,7 @@
 
 			__skb_queue_tail(list, new_skb);
 
-			start += pkt_len;
+			start += pkt_len * gro_count;
 			start_pkt_num = total_pkt + 1;
 			gro_count = 0;
 		}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 4930e03..5f45ffe 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -214,6 +214,8 @@
 };
 
 static const struct pci_device_id rtl8169_pci_tbl[] = {
+	{ PCI_VDEVICE(REALTEK,	0x2502), RTL_CFG_1 },
+	{ PCI_VDEVICE(REALTEK,	0x2600), RTL_CFG_1 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8129), 0, 0, RTL_CFG_0 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8136), 0, 0, RTL_CFG_2 },
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8161), 0, 0, RTL_CFG_1 },
@@ -717,6 +719,7 @@
 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
 module_param_named(debug, debug.msg_enable, int, 0);
 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
+MODULE_SOFTDEP("pre: realtek");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(FIRMWARE_8168D_1);
 MODULE_FIRMWARE(FIRMWARE_8168D_2);
@@ -1528,6 +1531,8 @@
 	}
 
 	RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+
+	device_set_wakeup_enable(tp_to_dev(tp), wolopts);
 }
 
 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -1549,8 +1554,6 @@
 
 	rtl_unlock_work(tp);
 
-	device_set_wakeup_enable(d, tp->saved_wolopts);
-
 	pm_runtime_put_noidle(d);
 
 	return 0;
@@ -1730,11 +1733,13 @@
 
 static bool rtl8169_update_counters(struct rtl8169_private *tp)
 {
+	u8 val = RTL_R8(tp, ChipCmd);
+
 	/*
 	 * Some chips are unable to dump tally counters when the receiver
-	 * is disabled.
+	 * is disabled. If 0xff chip may be in a PCI power-save state.
 	 */
-	if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0)
+	if (!(val & CmdRxEnb) || val == 0xff)
 		return true;
 
 	return rtl8169_do_counters(tp, CounterDump);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index d6f7539..8441c86 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -344,7 +344,7 @@
 	int i;
 
 	priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
-		ETH_HLEN + VLAN_HLEN;
+		ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
 
 	/* Allocate RX and TX skb rings */
 	priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -525,13 +525,15 @@
 {
 	u8 *hw_csum;
 
-	/* The hardware checksum is 2 bytes appended to packet data */
-	if (unlikely(skb->len < 2))
+	/* The hardware checksum is contained in sizeof(__sum16) (2) bytes
+	 * appended to packet data
+	 */
+	if (unlikely(skb->len < sizeof(__sum16)))
 		return;
-	hw_csum = skb_tail_pointer(skb) - 2;
+	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
 	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
 	skb->ip_summed = CHECKSUM_COMPLETE;
-	skb_trim(skb, skb->len - 2);
+	skb_trim(skb, skb->len - sizeof(__sum16));
 }
 
 /* Packet receive function for Ethernet AVB */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 99ea5c4..2103b86 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4247,6 +4247,7 @@
 	priv->wq = create_singlethread_workqueue("stmmac_wq");
 	if (!priv->wq) {
 		dev_err(priv->device, "failed to create workqueue\n");
+		ret = -ENOMEM;
 		goto error_wq;
 	}
 
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 9020b08..7ec4eb7 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1,22 +1,9 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
 /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
  *
  * Copyright (C) 2004 Sun Microsystems Inc.
  * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
  * This driver uses the sungem driver (c) David Miller
  * (davem@redhat.com) as its basis.
  *
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index 13f3860..ae5f05f 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -1,23 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0+ */
 /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
  * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
  *
  * Copyright (C) 2004 Sun Microsystems Inc.
  * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
  * vendor id: 0x108E (Sun Microsystems, Inc.)
  * device id: 0xabba (Cassini)
  * revision ids: 0x01 = Cassini
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index d79a69d..54e63ec 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -524,10 +524,7 @@
 
 
 	/* Start resync timer again -- the TNC might be still absent */
-
-	del_timer(&sp->resync_t);
-	sp->resync_t.expires	= jiffies + SIXP_RESYNC_TIMEOUT;
-	add_timer(&sp->resync_t);
+	mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
 }
 
 static inline int tnc_init(struct sixpack *sp)
@@ -538,9 +535,7 @@
 
 	sp->tty->ops->write(sp->tty, &inbyte, 1);
 
-	del_timer(&sp->resync_t);
-	sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
-	add_timer(&sp->resync_t);
+	mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
 
 	return 0;
 }
@@ -918,11 +913,8 @@
         /* if the state byte has been received, the TNC is present,
            so the resync timer can be reset. */
 
-	if (sp->tnc_state == TNC_IN_SYNC) {
-		del_timer(&sp->resync_t);
-		sp->resync_t.expires	= jiffies + SIXP_INIT_RESYNC_TIMEOUT;
-		add_timer(&sp->resync_t);
-	}
+	if (sp->tnc_state == TNC_IN_SYNC)
+		mod_timer(&sp->resync_t, jiffies + SIXP_INIT_RESYNC_TIMEOUT);
 
 	sp->status1 = cmd & SIXP_PRIO_DATA_MASK;
 }
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 0ff5a40..b2ff903 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -721,7 +721,7 @@
 static void ca8210_rx_done(struct cas_control *cas_ctl)
 {
 	u8 *buf;
-	u8 len;
+	unsigned int len;
 	struct work_priv_container *mlme_reset_wpc;
 	struct ca8210_priv *priv = cas_ctl->priv;
 
@@ -730,7 +730,7 @@
 	if (len > CA8210_SPI_BUF_SIZE) {
 		dev_crit(
 			&priv->spi->dev,
-			"Received packet len (%d) erroneously long\n",
+			"Received packet len (%u) erroneously long\n",
 			len
 		);
 		goto finish;
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index bf70ab8..624bff4 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -500,7 +500,7 @@
 	    !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
 		return -EINVAL;
 
-	if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+	if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
 			     info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
 			     hwsim_edge_policy, NULL))
 		return -EINVAL;
@@ -550,7 +550,7 @@
 	    !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
 		return -EINVAL;
 
-	if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+	if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
 			     info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
 			     hwsim_edge_policy, NULL))
 		return -EINVAL;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 4a94956..5fb5418 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -97,12 +97,12 @@
 			err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
 			if (!err) {
 				mdev->l3mdev_ops = &ipvl_l3mdev_ops;
-				mdev->priv_flags |= IFF_L3MDEV_MASTER;
+				mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
 			} else
 				goto fail;
 		} else if (port->mode == IPVLAN_MODE_L3S) {
 			/* Old mode was L3S */
-			mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
+			mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
 			ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
 			mdev->l3mdev_ops = NULL;
 		}
@@ -162,7 +162,7 @@
 	struct sk_buff *skb;
 
 	if (port->mode == IPVLAN_MODE_L3S) {
-		dev->priv_flags &= ~IFF_L3MDEV_MASTER;
+		dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
 		ipvlan_unregister_nf_hook(dev_net(dev));
 		dev->l3mdev_ops = NULL;
 	}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f7c69ca..d71be15 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1063,6 +1063,39 @@
 	return 0;
 }
 
+/* The VOD can be out of specification on link up. Poke an
+ * undocumented register, in an undocumented page, with a magic value
+ * to fix this.
+ */
+static int m88e6390_errata(struct phy_device *phydev)
+{
+	int err;
+
+	err = phy_write(phydev, MII_BMCR,
+			BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX);
+	if (err)
+		return err;
+
+	usleep_range(300, 400);
+
+	err = phy_write_paged(phydev, 0xf8, 0x08, 0x36);
+	if (err)
+		return err;
+
+	return genphy_soft_reset(phydev);
+}
+
+static int m88e6390_config_aneg(struct phy_device *phydev)
+{
+	int err;
+
+	err = m88e6390_errata(phydev);
+	if (err)
+		return err;
+
+	return m88e1510_config_aneg(phydev);
+}
+
 /**
  * fiber_lpa_to_ethtool_lpa_t
  * @lpa: value of the MII_LPA register for fiber link
@@ -1418,7 +1451,7 @@
 		 * before enabling it if !phy_interrupt_is_valid()
 		 */
 		if (!phy_interrupt_is_valid(phydev))
-			phy_read(phydev, MII_M1011_IEVENT);
+			__phy_read(phydev, MII_M1011_IEVENT);
 
 		/* Enable the WOL interrupt */
 		err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
@@ -2313,7 +2346,7 @@
 		.flags = PHY_HAS_INTERRUPT,
 		.probe = m88e6390_probe,
 		.config_init = &marvell_config_init,
-		.config_aneg = &m88e1510_config_aneg,
+		.config_aneg = &m88e6390_config_aneg,
 		.read_status = &marvell_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 98f4b1f..15c5586 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -391,6 +391,7 @@
 	if (IS_ERR(gpiod)) {
 		dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
 			bus->id);
+		device_del(&bus->dev);
 		return PTR_ERR(gpiod);
 	} else	if (gpiod) {
 		bus->reset_gpiod = gpiod;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 20d1be2..2c32c79 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -164,11 +164,8 @@
 	if (ret < 0)
 		return ret;
 
-	/* The PHY needs to renegotiate. */
-	phydev->link = 0;
-	phydev->state = PHY_UP;
-
-	phy_start_machine(phydev);
+	if (phydev->attached_dev && phydev->adjust_link)
+		phy_start_machine(phydev);
 
 	return 0;
 }
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index bdc4d23..7eae088 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -770,7 +770,7 @@
 {
 	struct sk_buff *skb;
 	unsigned char *p;
-	unsigned int len, fcs, proto;
+	unsigned int len, fcs;
 
 	skb = ap->rpkt;
 	if (ap->state & (SC_TOSS | SC_ESCAPE))
@@ -799,14 +799,14 @@
 			goto err;
 		p = skb_pull(skb, 2);
 	}
-	proto = p[0];
-	if (proto & 1) {
-		/* protocol is compressed */
-		*(u8 *)skb_push(skb, 1) = 0;
-	} else {
+
+	/* If protocol field is not compressed, it can be LCP packet */
+	if (!(p[0] & 0x01)) {
+		unsigned int proto;
+
 		if (skb->len < 2)
 			goto err;
-		proto = (proto << 8) + p[1];
+		proto = (p[0] << 8) + p[1];
 		if (proto == PPP_LCP)
 			async_lcp_peek(ap, p, skb->len, 1);
 	}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 02ad03a..8b1ef1b 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1965,6 +1965,46 @@
 	ppp_recv_unlock(ppp);
 }
 
+/**
+ * __ppp_decompress_proto - Decompress protocol field, slim version.
+ * @skb: Socket buffer where protocol field should be decompressed. It must have
+ *	 at least 1 byte of head room and 1 byte of linear data. First byte of
+ *	 data must be a protocol field byte.
+ *
+ * Decompress protocol field in PPP header if it's compressed, e.g. when
+ * Protocol-Field-Compression (PFC) was negotiated. No checks w.r.t. skb data
+ * length are done in this function.
+ */
+static void __ppp_decompress_proto(struct sk_buff *skb)
+{
+	if (skb->data[0] & 0x01)
+		*(u8 *)skb_push(skb, 1) = 0x00;
+}
+
+/**
+ * ppp_decompress_proto - Check skb data room and decompress protocol field.
+ * @skb: Socket buffer where protocol field should be decompressed. First byte
+ *	 of data must be a protocol field byte.
+ *
+ * Decompress protocol field in PPP header if it's compressed, e.g. when
+ * Protocol-Field-Compression (PFC) was negotiated. This function also makes
+ * sure that skb data room is sufficient for Protocol field, before and after
+ * decompression.
+ *
+ * Return: true - decompressed successfully, false - not enough room in skb.
+ */
+static bool ppp_decompress_proto(struct sk_buff *skb)
+{
+	/* At least one byte should be present (if protocol is compressed) */
+	if (!pskb_may_pull(skb, 1))
+		return false;
+
+	__ppp_decompress_proto(skb);
+
+	/* Protocol field should occupy 2 bytes when not compressed */
+	return pskb_may_pull(skb, 2);
+}
+
 void
 ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
 {
@@ -1977,7 +2017,7 @@
 	}
 
 	read_lock_bh(&pch->upl);
-	if (!pskb_may_pull(skb, 2)) {
+	if (!ppp_decompress_proto(skb)) {
 		kfree_skb(skb);
 		if (pch->ppp) {
 			++pch->ppp->dev->stats.rx_length_errors;
@@ -2074,6 +2114,9 @@
 	if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
 		goto err;
 
+	/* At this point the "Protocol" field MUST be decompressed, either in
+	 * ppp_input(), ppp_decompress_frame() or in ppp_receive_mp_frame().
+	 */
 	proto = PPP_PROTO(skb);
 	switch (proto) {
 	case PPP_VJC_COMP:
@@ -2245,6 +2288,9 @@
 		skb_put(skb, len);
 		skb_pull(skb, 2);	/* pull off the A/C bytes */
 
+		/* Don't call __ppp_decompress_proto() here, but instead rely on
+		 * corresponding algo (mppe/bsd/deflate) to decompress it.
+		 */
 	} else {
 		/* Uncompressed frame - pass to decompressor so it
 		   can update its dictionary if necessary. */
@@ -2290,9 +2336,11 @@
 
 	/*
 	 * Do protocol ID decompression on the first fragment of each packet.
+	 * We have to do that here, because ppp_receive_nonmp_frame() expects
+	 * decompressed protocol field.
 	 */
-	if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1))
-		*(u8 *)skb_push(skb, 1) = 0;
+	if (PPP_MP_CB(skb)->BEbits & B)
+		__ppp_decompress_proto(skb);
 
 	/*
 	 * Expand sequence number to 32 bits, making it as close
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 047f6c6..d02ba24 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -709,11 +709,10 @@
 		p = skb_pull(skb, 2);
 	}
 
-	/* decompress protocol field if compressed */
-	if (p[0] & 1) {
-		/* protocol is compressed */
-		*(u8 *)skb_push(skb, 1) = 0;
-	} else if (skb->len < 2)
+	/* PPP packet length should be >= 2 bytes when protocol field is not
+	 * compressed.
+	 */
+	if (!(p[0] & 0x01) && skb->len < 2)
 		goto err;
 
 	/* queue the frame to be processed */
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 62dc564..f22639f 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -445,6 +445,7 @@
 	if (pskb_trim_rcsum(skb, len))
 		goto drop;
 
+	ph = pppoe_hdr(skb);
 	pn = pppoe_pernet(dev_net(dev));
 
 	/* Note that get_item does a sock_hold(), so sk_pppox(po)
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 67ffe74..8f09edd 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -325,11 +325,6 @@
 			skb_pull(skb, 2);
 		}
 
-		if ((*skb->data) & 1) {
-			/* protocol is compressed */
-			*(u8 *)skb_push(skb, 1) = 0;
-		}
-
 		skb->ip_summed = CHECKSUM_NONE;
 		skb_set_network_header(skb, skb->head-skb->data);
 		ppp_input(&po->chan, skb);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 1cd8728..0baade2 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -859,10 +859,6 @@
 		err = 0;
 	}
 
-	rcu_assign_pointer(tfile->tun, tun);
-	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
-	tun->numqueues++;
-
 	if (tfile->detached) {
 		tun_enable_queue(tfile);
 	} else {
@@ -870,12 +866,18 @@
 		tun_napi_init(tun, tfile, napi, napi_frags);
 	}
 
-	tun_set_real_num_queues(tun);
-
 	/* device is allowed to go away first, so no need to hold extra
 	 * refcnt.
 	 */
 
+	/* Publish tfile->tun and tun->tfiles only after we've fully
+	 * initialized tfile; otherwise we risk using half-initialized
+	 * object.
+	 */
+	rcu_assign_pointer(tfile->tun, tun);
+	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
+	tun->numqueues++;
+	tun_set_real_num_queues(tun);
 out:
 	return err;
 }
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 184c24b..d6916f7 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2807,6 +2807,12 @@
 		return -EIO;
 	}
 
+	/* check if we have a valid interface */
+	if (if_num > 16) {
+		kfree(config_data);
+		return -EINVAL;
+	}
+
 	switch (config_data[if_num]) {
 	case 0x0:
 		result = 0;
@@ -2877,10 +2883,18 @@
 
 	/* Get the interface/port specification from either driver_info or from
 	 * the device itself */
-	if (id->driver_info)
+	if (id->driver_info) {
+		/* if_num is controlled by the device, driver_info is a 0 terminated
+		 * array. Make sure, the access is in bounds! */
+		for (i = 0; i <= if_num; ++i)
+			if (((u32 *)(id->driver_info))[i] == 0)
+				goto exit;
 		port_spec = ((u32 *)(id->driver_info))[if_num];
-	else
+	} else {
 		port_spec = hso_get_config_data(interface);
+		if (port_spec < 0)
+			goto exit;
+	}
 
 	/* Check if we need to switch to alt interfaces prior to port
 	 * configuration */
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index c3c9ba4..d2f94ea 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Copyright (C) 2015 Microchip Technology
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/version.h>
 #include <linux/module.h>
@@ -948,11 +936,9 @@
 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
 
 	if (ret == 0) {
-		if (sig == OTP_INDICATOR_1)
-			offset = offset;
-		else if (sig == OTP_INDICATOR_2)
+		if (sig == OTP_INDICATOR_2)
 			offset += 0x100;
-		else
+		else if (sig != OTP_INDICATOR_1)
 			ret = -EINVAL;
 		if (!ret)
 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
@@ -1027,7 +1013,7 @@
 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
 				    int index, u8 addr[ETH_ALEN])
 {
-	u32	temp;
+	u32 temp;
 
 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
 		temp = addr[3];
@@ -1838,8 +1824,7 @@
 
 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
 	ret = of_mdiobus_register(dev->mdiobus, node);
-	if (node)
-		of_node_put(node);
+	of_node_put(node);
 	if (ret) {
 		netdev_err(dev->net, "can't register MDIO bus\n");
 		goto exit1;
@@ -2335,6 +2320,10 @@
 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
 
+	/* Added to support MAC address changes */
+	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+
 	return 0;
 }
 
@@ -2693,7 +2682,7 @@
 
 static int lan78xx_stop(struct net_device *net)
 {
-	struct lan78xx_net		*dev = netdev_priv(net);
+	struct lan78xx_net *dev = netdev_priv(net);
 
 	if (timer_pending(&dev->stat_monitor))
 		del_timer_sync(&dev->stat_monitor);
@@ -2943,6 +2932,11 @@
 	int i;
 
 	ret = lan78xx_get_endpoints(dev, intf);
+	if (ret) {
+		netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
+			    ret);
+		return ret;
+	}
 
 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
 
@@ -3071,7 +3065,7 @@
 
 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
 {
-	int		status;
+	int status;
 
 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
 		skb_queue_tail(&dev->rxq_pause, skb);
@@ -3338,9 +3332,9 @@
 	count = 0;
 	length = 0;
 	spin_lock_irqsave(&tqp->lock, flags);
-	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
+	skb_queue_walk(tqp, skb) {
 		if (skb_is_gso(skb)) {
-			if (pkt_cnt) {
+			if (!skb_queue_is_first(tqp, skb)) {
 				/* handle previous packets first */
 				break;
 			}
@@ -3631,10 +3625,10 @@
 
 static void lan78xx_disconnect(struct usb_interface *intf)
 {
-	struct lan78xx_net		*dev;
-	struct usb_device		*udev;
-	struct net_device		*net;
-	struct phy_device		*phydev;
+	struct lan78xx_net *dev;
+	struct usb_device *udev;
+	struct net_device *net;
+	struct phy_device *phydev;
 
 	dev = usb_get_intfdata(intf);
 	usb_set_intfdata(intf, NULL);
@@ -3752,7 +3746,6 @@
 	ret = lan78xx_bind(dev, intf);
 	if (ret < 0)
 		goto out2;
-	strcpy(netdev->name, "eth%d");
 
 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 72a55b6..735ad83 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -123,6 +123,7 @@
 	dev->addr_len        = 0;
 	dev->flags           = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
 	dev->netdev_ops      = &qmimux_netdev_ops;
+	dev->mtu             = 1500;
 	dev->needs_free_netdev = true;
 }
 
@@ -151,17 +152,18 @@
 
 static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 {
-	unsigned int len, offset = sizeof(struct qmimux_hdr);
+	unsigned int len, offset = 0;
 	struct qmimux_hdr *hdr;
 	struct net_device *net;
 	struct sk_buff *skbn;
+	u8 qmimux_hdr_sz = sizeof(*hdr);
 
-	while (offset < skb->len) {
-		hdr = (struct qmimux_hdr *)skb->data;
+	while (offset + qmimux_hdr_sz < skb->len) {
+		hdr = (struct qmimux_hdr *)(skb->data + offset);
 		len = be16_to_cpu(hdr->pkt_len);
 
 		/* drop the packet, bogus length */
-		if (offset + len > skb->len)
+		if (offset + len + qmimux_hdr_sz > skb->len)
 			return 0;
 
 		/* control packet, we do not know what to do */
@@ -176,7 +178,7 @@
 			return 0;
 		skbn->dev = net;
 
-		switch (skb->data[offset] & 0xf0) {
+		switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
 		case 0x40:
 			skbn->protocol = htons(ETH_P_IP);
 			break;
@@ -188,12 +190,12 @@
 			goto skip;
 		}
 
-		skb_put_data(skbn, skb->data + offset, len);
+		skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, len);
 		if (netif_rx(skbn) != NET_RX_SUCCESS)
 			return 0;
 
 skip:
-		offset += len + sizeof(struct qmimux_hdr);
+		offset += len + qmimux_hdr_sz;
 	}
 	return 1;
 }
@@ -1117,6 +1119,7 @@
 	{QMI_FIXED_INTF(0x1435, 0xd181, 4)},	/* Wistron NeWeb D18Q1 */
 	{QMI_FIXED_INTF(0x1435, 0xd181, 5)},	/* Wistron NeWeb D18Q1 */
 	{QMI_FIXED_INTF(0x1435, 0xd191, 4)},	/* Wistron NeWeb D19Q1 */
+	{QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)},	/* Fibocom NL668 series */
 	{QMI_FIXED_INTF(0x16d8, 0x6003, 0)},	/* CMOTech 6003 */
 	{QMI_FIXED_INTF(0x16d8, 0x6007, 0)},	/* CMOTech CHE-628S */
 	{QMI_FIXED_INTF(0x16d8, 0x6008, 0)},	/* CMOTech CMU-301 */
@@ -1229,6 +1232,7 @@
 	{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},	/* Telit ME910 dual modem */
 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)},	/* Telit LE920, LE920A4 */
+	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)},	/* Telit LN940 series */
 	{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)},	/* Telewell TW-3G HSPA+ */
 	{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)},	/* Telewell TW-3G HSPA+ */
 	{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},	/* XS Stick W100-2 from 4G Systems */
@@ -1263,6 +1267,7 @@
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)},	/* Quectel EC21 Mini PCIe */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)},	/* Quectel EG91 */
 	{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},	/* Quectel BG96 */
+	{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)},	/* Fibocom NL678 series */
 
 	/* 4. Gobi 1000 devices */
 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ad14fbf..42feaa4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -57,6 +57,8 @@
 #define VIRTIO_XDP_TX		BIT(0)
 #define VIRTIO_XDP_REDIR	BIT(1)
 
+#define VIRTIO_XDP_FLAG	BIT(0)
+
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -251,6 +253,21 @@
 	char padding[4];
 };
 
+static bool is_xdp_frame(void *ptr)
+{
+	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
+}
+
+static void *xdp_to_ptr(struct xdp_frame *ptr)
+{
+	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
+}
+
+static struct xdp_frame *ptr_to_xdp(void *ptr)
+{
+	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
+}
+
 /* Converting between virtqueue no. and kernel tx/rx queue no.
  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
  */
@@ -461,7 +478,8 @@
 
 	sg_init_one(sq->sg, xdpf->data, xdpf->len);
 
-	err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
+	err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
+				   GFP_ATOMIC);
 	if (unlikely(err))
 		return -ENOSPC; /* Caller handle free/refcnt */
 
@@ -481,15 +499,22 @@
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 	struct receive_queue *rq = vi->rq;
-	struct xdp_frame *xdpf_sent;
 	struct bpf_prog *xdp_prog;
 	struct send_queue *sq;
 	unsigned int len;
 	int drops = 0;
 	int kicks = 0;
 	int ret, err;
+	void *ptr;
 	int i;
 
+	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
+	 * indicate XDP resources have been successfully allocated.
+	 */
+	xdp_prog = rcu_dereference(rq->xdp_prog);
+	if (!xdp_prog)
+		return -ENXIO;
+
 	sq = virtnet_xdp_sq(vi);
 
 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
@@ -498,19 +523,13 @@
 		goto out;
 	}
 
-	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
-	 * indicate XDP resources have been successfully allocated.
-	 */
-	xdp_prog = rcu_dereference(rq->xdp_prog);
-	if (!xdp_prog) {
-		ret = -ENXIO;
-		drops = n;
-		goto out;
-	}
-
 	/* Free up any pending old buffers before queueing new ones. */
-	while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
-		xdp_return_frame(xdpf_sent);
+	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+		if (likely(is_xdp_frame(ptr)))
+			xdp_return_frame(ptr_to_xdp(ptr));
+		else
+			napi_consume_skb(ptr, false);
+	}
 
 	for (i = 0; i < n; i++) {
 		struct xdp_frame *xdpf = frames[i];
@@ -1329,20 +1348,28 @@
 	return stats.packets;
 }
 
-static void free_old_xmit_skbs(struct send_queue *sq)
+static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
 {
-	struct sk_buff *skb;
 	unsigned int len;
 	unsigned int packets = 0;
 	unsigned int bytes = 0;
+	void *ptr;
 
-	while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
-		pr_debug("Sent skb %p\n", skb);
+	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+		if (likely(!is_xdp_frame(ptr))) {
+			struct sk_buff *skb = ptr;
 
-		bytes += skb->len;
+			pr_debug("Sent skb %p\n", skb);
+
+			bytes += skb->len;
+			napi_consume_skb(skb, in_napi);
+		} else {
+			struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+			bytes += frame->len;
+			xdp_return_frame(frame);
+		}
 		packets++;
-
-		dev_consume_skb_any(skb);
 	}
 
 	/* Avoid overhead when no packets have been processed
@@ -1357,6 +1384,16 @@
 	u64_stats_update_end(&sq->stats.syncp);
 }
 
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+{
+	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+		return false;
+	else if (q < vi->curr_queue_pairs)
+		return true;
+	else
+		return false;
+}
+
 static void virtnet_poll_cleantx(struct receive_queue *rq)
 {
 	struct virtnet_info *vi = rq->vq->vdev->priv;
@@ -1364,11 +1401,11 @@
 	struct send_queue *sq = &vi->sq[index];
 	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
 
-	if (!sq->napi.weight)
+	if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
 		return;
 
 	if (__netif_tx_trylock(txq)) {
-		free_old_xmit_skbs(sq);
+		free_old_xmit_skbs(sq, true);
 		__netif_tx_unlock(txq);
 	}
 
@@ -1441,10 +1478,18 @@
 {
 	struct send_queue *sq = container_of(napi, struct send_queue, napi);
 	struct virtnet_info *vi = sq->vq->vdev->priv;
-	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
+	unsigned int index = vq2txq(sq->vq);
+	struct netdev_queue *txq;
 
+	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
+		/* We don't need to enable cb for XDP */
+		napi_complete_done(napi, 0);
+		return 0;
+	}
+
+	txq = netdev_get_tx_queue(vi->dev, index);
 	__netif_tx_lock(txq, raw_smp_processor_id());
-	free_old_xmit_skbs(sq);
+	free_old_xmit_skbs(sq, true);
 	__netif_tx_unlock(txq);
 
 	virtqueue_napi_complete(napi, sq->vq, 0);
@@ -1513,7 +1558,7 @@
 	bool use_napi = sq->napi.weight;
 
 	/* Free up any pending old buffers before queueing new ones. */
-	free_old_xmit_skbs(sq);
+	free_old_xmit_skbs(sq, false);
 
 	if (use_napi && kick)
 		virtqueue_enable_cb_delayed(sq->vq);
@@ -1556,7 +1601,7 @@
 		if (!use_napi &&
 		    unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
 			/* More just got used, free them then recheck. */
-			free_old_xmit_skbs(sq);
+			free_old_xmit_skbs(sq, false);
 			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
 				netif_start_subqueue(dev, qnum);
 				virtqueue_disable_cb(sq->vq);
@@ -2345,6 +2390,10 @@
 		return -ENOMEM;
 	}
 
+	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
+	if (!prog && !old_prog)
+		return 0;
+
 	if (prog) {
 		prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
 		if (IS_ERR(prog))
@@ -2352,36 +2401,62 @@
 	}
 
 	/* Make sure NAPI is not using any XDP TX queues for RX. */
-	if (netif_running(dev))
-		for (i = 0; i < vi->max_queue_pairs; i++)
+	if (netif_running(dev)) {
+		for (i = 0; i < vi->max_queue_pairs; i++) {
 			napi_disable(&vi->rq[i].napi);
+			virtnet_napi_tx_disable(&vi->sq[i].napi);
+		}
+	}
 
-	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
+	if (!prog) {
+		for (i = 0; i < vi->max_queue_pairs; i++) {
+			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
+			if (i == 0)
+				virtnet_restore_guest_offloads(vi);
+		}
+		synchronize_net();
+	}
+
 	err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
 	if (err)
 		goto err;
+	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
 	vi->xdp_queue_pairs = xdp_qp;
 
-	for (i = 0; i < vi->max_queue_pairs; i++) {
-		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
-		rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
-		if (i == 0) {
-			if (!old_prog)
+	if (prog) {
+		for (i = 0; i < vi->max_queue_pairs; i++) {
+			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
+			if (i == 0 && !old_prog)
 				virtnet_clear_guest_offloads(vi);
-			if (!prog)
-				virtnet_restore_guest_offloads(vi);
 		}
+	}
+
+	for (i = 0; i < vi->max_queue_pairs; i++) {
 		if (old_prog)
 			bpf_prog_put(old_prog);
-		if (netif_running(dev))
+		if (netif_running(dev)) {
 			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+					       &vi->sq[i].napi);
+		}
 	}
 
 	return 0;
 
 err:
-	for (i = 0; i < vi->max_queue_pairs; i++)
-		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+	if (!prog) {
+		virtnet_clear_guest_offloads(vi);
+		for (i = 0; i < vi->max_queue_pairs; i++)
+			rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
+	}
+
+	if (netif_running(dev)) {
+		for (i = 0; i < vi->max_queue_pairs; i++) {
+			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+					       &vi->sq[i].napi);
+		}
+	}
 	if (prog)
 		bpf_prog_sub(prog, vi->max_queue_pairs - 1);
 	return err;
@@ -2537,16 +2612,6 @@
 			put_page(vi->rq[i].alloc_frag.page);
 }
 
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-{
-	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
-		return false;
-	else if (q < vi->curr_queue_pairs)
-		return true;
-	else
-		return false;
-}
-
 static void free_unused_bufs(struct virtnet_info *vi)
 {
 	void *buf;
@@ -2555,10 +2620,10 @@
 	for (i = 0; i < vi->max_queue_pairs; i++) {
 		struct virtqueue *vq = vi->sq[i].vq;
 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
-			if (!is_xdp_raw_buffer_queue(vi, i))
+			if (!is_xdp_frame(buf))
 				dev_kfree_skb(buf);
 			else
-				put_page(virt_to_head_page(buf));
+				xdp_return_frame(ptr_to_xdp(buf));
 		}
 	}
 
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 74c06a5..4f25c2d 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -486,8 +486,10 @@
 
 	/* Cleanup */
 	kfree(sl->xbuff);
+	sl->xbuff = NULL;
 noxbuff:
 	kfree(sl->rbuff);
+	sl->rbuff = NULL;
 norbuff:
 	return -ENOMEM;
 }
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index a63c97e..6f10331 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -71,7 +71,7 @@
 	spin_lock_bh(&ar->data_lock);
 
 	peer = ath10k_peer_find_by_id(ar, peer_id);
-	if (!peer)
+	if (!peer || !peer->sta)
 		goto out;
 
 	arsta = (struct ath10k_sta *)peer->sta->drv_priv;
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 4d1cd90..03d4cc6 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2589,7 +2589,7 @@
 	rcu_read_lock();
 	spin_lock_bh(&ar->data_lock);
 	peer = ath10k_peer_find_by_id(ar, peer_id);
-	if (!peer) {
+	if (!peer || !peer->sta) {
 		ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
 			    peer_id);
 		goto out;
@@ -2642,7 +2642,7 @@
 	rcu_read_lock();
 	spin_lock_bh(&ar->data_lock);
 	peer = ath10k_peer_find_by_id(ar, peer_id);
-	if (!peer) {
+	if (!peer || !peer->sta) {
 		ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
 			    peer_id);
 		goto out;
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index 7d72b64..760992f 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -64,3 +64,14 @@
 	  option if you are interested in debugging the driver.
 
 	  If unsure, say Y to make it easier to debug problems.
+
+config WIL6210_WRITE_IOCTL
+	bool "wil6210 write ioctl to the device"
+	depends on WIL6210
+	default y
+	help
+	  Say Y here to allow write-access from user-space to
+	  the device memory through ioctl. This is useful for
+	  debugging purposes only.
+
+	  If unsure, say N.
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 9dcdded..2bca87b 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -13,6 +13,7 @@
 wil6210-y += txrx_edma.o
 wil6210-y += debug.o
 wil6210-y += rx_reorder.o
+wil6210-y += ioctl.o
 wil6210-y += fw.o
 wil6210-y += pm.o
 wil6210-y += pmc.o
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
new file mode 100644
index 0000000..6b65e46
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/uaccess.h>
+
+#include "wil6210.h"
+#include <uapi/linux/wil6210_uapi.h>
+
+#define wil_hex_dump_ioctl(prefix_str, buf, len) \
+	print_hex_dump_debug("DBG[IOC ]" prefix_str, \
+			     DUMP_PREFIX_OFFSET, 16, 1, buf, len, true)
+#define wil_dbg_ioctl(wil, fmt, arg...) wil_dbg(wil, "DBG[IOC ]" fmt, ##arg)
+
+#define WIL_PRIV_DATA_MAX_LEN	8192
+#define CMD_SET_AP_WPS_P2P_IE	"SET_AP_WPS_P2P_IE"
+
+struct wil_android_priv_data {
+	char *buf;
+	int used_len;
+	int total_len;
+};
+
+static void __iomem *wil_ioc_addr(struct wil6210_priv *wil, u32 addr,
+				  u32 size, u32 op)
+{
+	void __iomem *a;
+	u32 off;
+
+	switch (op & WIL_MMIO_ADDR_MASK) {
+	case WIL_MMIO_ADDR_LINKER:
+		a = wmi_buffer(wil, cpu_to_le32(addr));
+		break;
+	case WIL_MMIO_ADDR_AHB:
+		a = wmi_addr(wil, addr);
+		break;
+	case WIL_MMIO_ADDR_BAR:
+		a = wmi_addr(wil, addr + WIL6210_FW_HOST_OFF);
+		break;
+	default:
+		wil_err(wil, "Unsupported address mode, op = 0x%08x\n", op);
+		return NULL;
+	}
+
+	off = a - wil->csr;
+	if (size > wil->bar_size - off) {
+		wil_err(wil,
+			"Invalid requested block: off(0x%08x) size(0x%08x)\n",
+			off, size);
+		return NULL;
+	}
+
+	return a;
+}
+
+static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data)
+{
+	struct wil_memio io;
+	void __iomem *a;
+	bool need_copy = false;
+
+	if (copy_from_user(&io, data, sizeof(io)))
+		return -EFAULT;
+
+	wil_dbg_ioctl(wil, "IO: addr = 0x%08x val = 0x%08x op = 0x%08x\n",
+		      io.addr, io.val, io.op);
+
+	a = wil_ioc_addr(wil, io.addr, sizeof(u32), io.op);
+	if (!a) {
+		wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr,
+			io.op);
+		return -EINVAL;
+	}
+	/* operation */
+	switch (io.op & WIL_MMIO_OP_MASK) {
+	case WIL_MMIO_READ:
+		io.val = readl_relaxed(a);
+		need_copy = true;
+		break;
+#if defined(CONFIG_WIL6210_WRITE_IOCTL)
+	case WIL_MMIO_WRITE:
+		writel_relaxed(io.val, a);
+		wmb(); /* make sure write propagated to HW */
+		break;
+#endif
+	default:
+		wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op);
+		return -EINVAL;
+	}
+
+	if (need_copy) {
+		wil_dbg_ioctl(wil,
+			      "IO done: addr(0x%08x) val(0x%08x) op(0x%08x)\n",
+			      io.addr, io.val, io.op);
+		if (copy_to_user(data, &io, sizeof(io)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int wil_ioc_memio_block(struct wil6210_priv *wil, void __user *data)
+{
+	struct wil_memio_block io;
+	void *block;
+	void __iomem *a;
+	int rc = 0;
+
+	if (copy_from_user(&io, data, sizeof(io)))
+		return -EFAULT;
+
+	wil_dbg_ioctl(wil, "IO: addr = 0x%08x size = 0x%08x op = 0x%08x\n",
+		      io.addr, io.size, io.op);
+
+	/* size */
+	if (io.size > WIL6210_MAX_MEM_SIZE) {
+		wil_err(wil, "size is too large:  0x%08x\n", io.size);
+		return -EINVAL;
+	}
+	if (io.size % 4) {
+		wil_err(wil, "size is not multiple of 4:  0x%08x\n", io.size);
+		return -EINVAL;
+	}
+
+	a = wil_ioc_addr(wil, io.addr, io.size, io.op);
+	if (!a) {
+		wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr,
+			io.op);
+		return -EINVAL;
+	}
+
+	block = kmalloc(io.size, GFP_USER);
+	if (!block)
+		return -ENOMEM;
+
+	/* operation */
+	switch (io.op & WIL_MMIO_OP_MASK) {
+	case WIL_MMIO_READ:
+		wil_memcpy_fromio_32(block, a, io.size);
+		wil_hex_dump_ioctl("Read  ", block, io.size);
+		if (copy_to_user((void __user *)(uintptr_t)io.block,
+				 block, io.size)) {
+			rc = -EFAULT;
+			goto out_free;
+		}
+		break;
+#if defined(CONFIG_WIL6210_WRITE_IOCTL)
+	case WIL_MMIO_WRITE:
+		if (copy_from_user(block, (void __user *)(uintptr_t)io.block,
+				   io.size)) {
+			rc = -EFAULT;
+			goto out_free;
+		}
+		wil_memcpy_toio_32(a, block, io.size);
+		wmb(); /* make sure write propagated to HW */
+		wil_hex_dump_ioctl("Write ", block, io.size);
+		break;
+#endif
+	default:
+		wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op);
+		rc = -EINVAL;
+		break;
+	}
+
+out_free:
+	kfree(block);
+	return rc;
+}
+
+static int wil_ioc_android(struct wil6210_priv *wil, void __user *data)
+{
+	int rc = 0;
+	char *command;
+	struct wil_android_priv_data priv_data;
+
+	wil_dbg_ioctl(wil, "ioc_android\n");
+
+	if (copy_from_user(&priv_data, data, sizeof(priv_data)))
+		return -EFAULT;
+
+	if (priv_data.total_len <= 0 ||
+	    priv_data.total_len >= WIL_PRIV_DATA_MAX_LEN) {
+		wil_err(wil, "invalid data len %d\n", priv_data.total_len);
+		return -EINVAL;
+	}
+
+	command = kmalloc(priv_data.total_len + 1, GFP_KERNEL);
+	if (!command)
+		return -ENOMEM;
+
+	if (copy_from_user(command, priv_data.buf, priv_data.total_len)) {
+		rc = -EFAULT;
+		goto out_free;
+	}
+
+	/* Make sure the command is NUL-terminated */
+	command[priv_data.total_len] = '\0';
+
+	wil_dbg_ioctl(wil, "ioc_android: command = %s\n", command);
+
+	/* P2P not supported, but WPS is (in AP mode).
+	 * Ignore those in order not to block WPS functionality
+	 * in non-P2P mode.
+	 */
+	if (strncasecmp(command, CMD_SET_AP_WPS_P2P_IE,
+			strlen(CMD_SET_AP_WPS_P2P_IE)) == 0)
+		rc = 0;
+	else
+		rc = -ENOIOCTLCMD;
+
+out_free:
+	kfree(command);
+	return rc;
+}
+
+int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd)
+{
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	switch (cmd) {
+	case WIL_IOCTL_MEMIO:
+		ret = wil_ioc_memio_dword(wil, data);
+		break;
+	case WIL_IOCTL_MEMIO_BLOCK:
+		ret = wil_ioc_memio_block(wil, data);
+		break;
+	case (SIOCDEVPRIVATE + 1):
+		ret = wil_ioc_android(wil, data);
+		break;
+	default:
+		wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd);
+		wil_pm_runtime_put(wil);
+		return -ENOIOCTLCMD;
+	}
+
+	wil_pm_runtime_put(wil);
+
+	wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
+	return ret;
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 4a424c4..098caeb 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -85,12 +85,20 @@
 	return rc;
 }
 
+static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+	struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+	return wil_ioctl(wil, ifr->ifr_data, cmd);
+}
+
 static const struct net_device_ops wil_netdev_ops = {
 	.ndo_open		= wil_open,
 	.ndo_stop		= wil_stop,
 	.ndo_start_xmit		= wil_start_xmit,
 	.ndo_set_mac_address	= eth_mac_addr,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= wil_do_ioctl,
 };
 
 static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index d224ea5..eaf5a11 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1372,6 +1372,7 @@
 
 int wil_iftype_nl2wmi(enum nl80211_iftype type);
 
+int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
 int wil_request_firmware(struct wil6210_priv *wil, const char *name,
 			 bool load);
 int wil_request_board(struct wil6210_priv *wil, const char *name);
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
index 85f2ca9..ef3ffa5 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.c
+++ b/drivers/net/wireless/broadcom/b43/phy_common.c
@@ -616,7 +616,7 @@
 	u8 i;
 	s32 tmp;
 	s8 signx = 1;
-	u32 angle = 0;
+	s32 angle = 0;
 	struct b43_c32 ret = { .i = 39797, .q = 0, };
 
 	while (theta > (180 << 16))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 64a794b..6f3faaf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5188,10 +5188,17 @@
 	.del_pmk = brcmf_cfg80211_del_pmk,
 };
 
-struct cfg80211_ops *brcmf_cfg80211_get_ops(void)
+struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings)
 {
-	return kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
+	struct cfg80211_ops *ops;
+
+	ops = kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
 		       GFP_KERNEL);
+
+	if (ops && settings->roamoff)
+		ops->update_connect_params = NULL;
+
+	return ops;
 }
 
 struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index a4aec00..9a6287f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -404,7 +404,7 @@
 void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
 s32 brcmf_cfg80211_up(struct net_device *ndev);
 s32 brcmf_cfg80211_down(struct net_device *ndev);
-struct cfg80211_ops *brcmf_cfg80211_get_ops(void);
+struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings);
 enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
 
 struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index b1f702f..860a437 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1130,7 +1130,7 @@
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	ops = brcmf_cfg80211_get_ops();
+	ops = brcmf_cfg80211_get_ops(settings);
 	if (!ops)
 		return -ENOMEM;
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 9095b83..9927079 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -641,8 +641,9 @@
 	struct brcmf_fw_request *fwreq;
 	char chipname[12];
 	const char *mp_path;
+	size_t mp_path_len;
 	u32 i, j;
-	char end;
+	char end = '\0';
 	size_t reqsz;
 
 	for (i = 0; i < table_size; i++) {
@@ -667,7 +668,10 @@
 		   mapping_table[i].fw_base, chipname);
 
 	mp_path = brcmf_mp_global.firmware_path;
-	end = mp_path[strlen(mp_path) - 1];
+	mp_path_len = strnlen(mp_path, BRCMF_FW_ALTPATH_LEN);
+	if (mp_path_len)
+		end = mp_path[mp_path_len - 1];
+
 	fwreq->n_items = n_fwnames;
 
 	for (j = 0; j < n_fwnames; j++) {
diff --git a/drivers/net/wireless/cnss2/Makefile b/drivers/net/wireless/cnss2/Makefile
index 3aeb9b7..4859eb2 100644
--- a/drivers/net/wireless/cnss2/Makefile
+++ b/drivers/net/wireless/cnss2/Makefile
@@ -7,4 +7,4 @@
 cnss2-y += debug.o
 cnss2-y += pci.o
 cnss2-y += power.o
-cnss2-$(CONFIG_CNSS2_QMI) += qmi.o wlan_firmware_service_v01.o
+cnss2-$(CONFIG_CNSS2_QMI) += qmi.o wlan_firmware_service_v01.o coexistence_service_v01.o
diff --git a/drivers/net/wireless/cnss2/coexistence_service_v01.c b/drivers/net/wireless/cnss2/coexistence_service_v01.c
new file mode 100644
index 0000000..1016226
--- /dev/null
+++ b/drivers/net/wireless/cnss2/coexistence_service_v01.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#include <linux/soc/qcom/qmi.h>
+
+#include "coexistence_service_v01.h"
+
+struct qmi_elem_info coex_antenna_switch_to_wlan_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				coex_antenna_switch_to_wlan_req_msg_v01,
+				antenna),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info coex_antenna_switch_to_wlan_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				coex_antenna_switch_to_wlan_resp_msg_v01,
+				resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				coex_antenna_switch_to_wlan_resp_msg_v01,
+				grant_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+				coex_antenna_switch_to_wlan_resp_msg_v01,
+				grant),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info coex_antenna_switch_to_mdm_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u64),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+				coex_antenna_switch_to_mdm_req_msg_v01,
+				antenna),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info coex_antenna_switch_to_mdm_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+				coex_antenna_switch_to_mdm_resp_msg_v01,
+				resp),
+		.ei_array      = qmi_response_type_v01_ei,
+	},
+};
diff --git a/drivers/net/wireless/cnss2/coexistence_service_v01.h b/drivers/net/wireless/cnss2/coexistence_service_v01.h
new file mode 100644
index 0000000..03ee7e9
--- /dev/null
+++ b/drivers/net/wireless/cnss2/coexistence_service_v01.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#ifndef COEXISTENCE_SERVICE_V01_H
+#define COEXISTENCE_SERVICE_V01_H
+
+#define COEX_SERVICE_ID_V01 0x22
+#define COEX_SERVICE_VERS_V01 0x01
+
+#define COEX_SERVICE_MAX_MSG_LEN 8204
+
+#define QMI_COEX_SWITCH_ANTENNA_TO_WLAN_RESP_V01 0x0042
+#define QMI_COEX_SWITCH_ANTENNA_TO_WLAN_REQ_V01 0x0042
+#define QMI_COEX_SWITCH_ANTENNA_TO_MDM_RESP_V01 0x0042
+#define QMI_COEX_SWITCH_ANTENNA_TO_MDM_REQ_V01 0x0042
+
+#define COEX_ANTENNA_BAND_2GHZ_CHAIN0_V01 ((u64)0x0000000000000001ULL)
+#define COEX_ANTENNA_BAND_2GHZ_CHAIN1_V01 ((u64)0x0000000000000002ULL)
+#define COEX_ANTENNA_BAND_5GHZ_CHAIN0_V01 ((u64)0x0000000000000004ULL)
+#define COEX_ANTENNA_BAND_5GHZ_CHAIN1_V01 ((u64)0x0000000000000008ULL)
+
+struct coex_antenna_switch_to_wlan_req_msg_v01 {
+	u64 antenna;
+};
+
+#define COEX_ANTENNA_SWITCH_TO_WLAN_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info coex_antenna_switch_to_wlan_req_msg_v01_ei[];
+
+struct coex_antenna_switch_to_wlan_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 grant_valid;
+	u64 grant;
+};
+
+#define COEX_ANTENNA_SWITCH_TO_WLAN_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info coex_antenna_switch_to_wlan_resp_msg_v01_ei[];
+
+struct coex_antenna_switch_to_mdm_req_msg_v01 {
+	u64 antenna;
+};
+
+#define COEX_ANTENNA_SWITCH_TO_MDM_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info coex_antenna_switch_to_mdm_req_msg_v01_ei[];
+
+struct coex_antenna_switch_to_mdm_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define COEX_ANTENNA_SWITCH_TO_MDM_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info coex_antenna_switch_to_mdm_resp_msg_v01_ei[];
+
+#endif
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index 4c58919..546aefd 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -91,6 +91,9 @@
 		case CNSS_DRIVER_DEBUG:
 			seq_puts(s, "DRIVER_DEBUG");
 			continue;
+		case CNSS_COEX_CONNECTED:
+			seq_puts(s, "COEX_CONNECTED");
+			continue;
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
diff --git a/drivers/net/wireless/cnss2/debug.h b/drivers/net/wireless/cnss2/debug.h
index 33a4a7e..51c74c5 100644
--- a/drivers/net/wireless/cnss2/debug.h
+++ b/drivers/net/wireless/cnss2/debug.h
@@ -54,6 +54,9 @@
 	} while (0)
 #endif
 
+#define cnss_fatal_err(_fmt, ...)					\
+	cnss_pr_err("fatal: " _fmt, ##__VA_ARGS__)
+
 int cnss_debug_init(void);
 void cnss_debug_deinit(void);
 int cnss_debugfs_create(struct cnss_plat_data *plat_priv);
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index a204e08..b4ad97a 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -356,7 +356,10 @@
 	if (ret)
 		goto out;
 
-	ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv);
+	cnss_wlfw_bdf_dnld_send_sync(plat_priv, CNSS_BDF_REGDB);
+
+	ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv,
+					   plat_priv->ctrl_params.bdf_type);
 	if (ret)
 		goto out;
 
@@ -373,6 +376,38 @@
 	return ret;
 }
 
+static int cnss_request_antenna_sharing(struct cnss_plat_data *plat_priv)
+{
+	int ret = 0;
+
+	if (!plat_priv->antenna) {
+		ret = cnss_wlfw_antenna_switch_send_sync(plat_priv);
+		if (ret)
+			goto out;
+	}
+
+	if (test_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state)) {
+		ret = coex_antenna_switch_to_wlan_send_sync_msg(plat_priv);
+		if (ret)
+			goto out;
+	}
+
+	ret = cnss_wlfw_antenna_grant_send_sync(plat_priv);
+	if (ret)
+		goto out;
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static void cnss_release_antenna_sharing(struct cnss_plat_data *plat_priv)
+{
+	if (test_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state))
+		coex_antenna_switch_to_mdm_send_sync_msg(plat_priv);
+}
+
 static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
 {
 	int ret = 0;
@@ -393,6 +428,7 @@
 		ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
 						    CNSS_WALTEST);
 	} else if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state)) {
+		cnss_request_antenna_sharing(plat_priv);
 		ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
 						    CNSS_CALIBRATION);
 	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
@@ -993,6 +1029,11 @@
 		return -EOPNOTSUPP;
 	}
 
+	if (cnss_pci_is_device_down(dev)) {
+		cnss_pr_info("Device is already in bad state, ignore force assert\n");
+		return 0;
+	}
+
 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
 		cnss_pr_info("Recovery is already in progress, ignore forced FW assert\n");
 		return 0;
@@ -1021,14 +1062,19 @@
 		return -EOPNOTSUPP;
 	}
 
+	if (cnss_pci_is_device_down(dev)) {
+		cnss_pr_info("Device is already in bad state, ignore force collect rddm\n");
+		return 0;
+	}
+
 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
 		cnss_pr_info("Recovery is already in progress, ignore forced collect rddm\n");
 		return 0;
 	}
 
-	cnss_driver_event_post(plat_priv,
-			       CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
-			       0, NULL);
+	ret = cnss_bus_force_fw_assert_hdlr(plat_priv);
+	if (ret)
+		return ret;
 
 	reinit_completion(&plat_priv->rddm_complete);
 	ret = wait_for_completion_timeout
@@ -1071,6 +1117,7 @@
 
 	plat_priv->cal_done = true;
 	cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
+	cnss_release_antenna_sharing(plat_priv);
 	cnss_bus_dev_shutdown(plat_priv);
 	complete(&plat_priv->cal_complete);
 	clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
@@ -1696,6 +1743,8 @@
 	if (ret)
 		goto destroy_debugfs;
 
+	cnss_register_coex_service(plat_priv);
+
 	cnss_pr_info("Platform driver probed successfully.\n");
 
 	return 0;
@@ -1731,6 +1780,7 @@
 {
 	struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
 
+	cnss_unregister_coex_service(plat_priv);
 	cnss_misc_deinit(plat_priv);
 	cnss_debugfs_destroy(plat_priv);
 	cnss_qmi_deinit(plat_priv);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index d8f2df9..c88684f 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -166,6 +166,7 @@
 	CNSS_FW_BOOT_RECOVERY,
 	CNSS_DEV_ERR_NOTIFY,
 	CNSS_DRIVER_DEBUG,
+	CNSS_COEX_CONNECTED,
 };
 
 struct cnss_recovery_data {
@@ -206,6 +207,7 @@
 enum cnss_bdf_type {
 	CNSS_BDF_BIN,
 	CNSS_BDF_ELF,
+	CNSS_BDF_REGDB = 4,
 	CNSS_BDF_DUMMY = 255,
 };
 
@@ -281,6 +283,7 @@
 	struct cnss_control_params ctrl_params;
 	u64 antenna;
 	u64 grant;
+	struct qmi_handle coex_qmi;
 };
 
 struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev);
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 3688a70..9fedf65 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -335,8 +335,8 @@
 
 	pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id);
 	if (device_id != pci_priv->device_id)  {
-		cnss_pr_err("PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
-			    device_id, pci_priv->device_id);
+		cnss_fatal_err("PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
+			       device_id, pci_priv->device_id);
 		return -EIO;
 	}
 
@@ -683,11 +683,6 @@
 	cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
 		    plat_priv->driver_state);
 
-	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
-		cnss_pr_dbg("Ignore crash shutdown\n");
-		return;
-	}
-
 	cnss_pci_collect_dump_info(pci_priv, true);
 }
 
@@ -1066,7 +1061,7 @@
 		pci_priv->pci_link_down_ind = true;
 		spin_unlock_irqrestore(&pci_link_down_lock, flags);
 
-		cnss_pr_err("PCI link down, schedule recovery!\n");
+		cnss_fatal_err("PCI link down, schedule recovery!\n");
 		if (pci_dev->device == QCA6174_DEVICE_ID)
 			disable_irq(pci_dev->irq);
 		cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
@@ -1215,6 +1210,9 @@
 	if (driver_ops && driver_ops->suspend_noirq)
 		ret = driver_ops->suspend_noirq(pci_dev);
 
+	if (pci_priv->disable_pc && !pci_dev->state_saved)
+		pci_save_state(pci_dev);
+
 out:
 	return ret;
 }
@@ -1677,17 +1675,12 @@
 	if (!plat_priv)
 		return -ENODEV;
 
-	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev)) {
-		cnss_pr_info("Device is already in bad state, ignore force assert\n");
-		return 0;
-	}
-
 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
 	if (ret) {
 		cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
 				       CNSS_REASON_DEFAULT);
-		return 0;
+		return ret;
 	}
 
 	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
@@ -1703,7 +1696,7 @@
 	if (!pci_priv)
 		return;
 
-	cnss_pr_err("Timeout waiting for FW ready indication\n");
+	cnss_fatal_err("Timeout waiting for FW ready indication\n");
 
 	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
 			       CNSS_REASON_TIMEOUT);
@@ -2234,7 +2227,7 @@
 	if (!pci_priv)
 		return;
 
-	cnss_pr_err("Timeout waiting for RDDM notification\n");
+	cnss_fatal_err("Timeout waiting for RDDM notification\n");
 
 	cnss_schedule_recovery(&pci_priv->pci_dev->dev, CNSS_REASON_TIMEOUT);
 }
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index 0a13eed..dc60e9e 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -17,10 +17,12 @@
 #define ELF_BDF_FILE_NAME_PREFIX	"bdwlan.e"
 #define BIN_BDF_FILE_NAME		"bdwlan.bin"
 #define BIN_BDF_FILE_NAME_PREFIX	"bdwlan.b"
+#define REGDB_FILE_NAME			"regdb.bin"
 #define DUMMY_BDF_FILE_NAME		"bdwlan.dmy"
 
 #define QMI_WLFW_TIMEOUT_MS		(plat_priv->ctrl_params.qmi_timeout)
 #define QMI_WLFW_TIMEOUT_JF		msecs_to_jiffies(QMI_WLFW_TIMEOUT_MS)
+#define COEX_TIMEOUT			QMI_WLFW_TIMEOUT_JF
 
 #define QMI_WLFW_MAX_RECV_BUF_SIZE	SZ_8K
 
@@ -399,7 +401,8 @@
 	return ret;
 }
 
-int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv)
+int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
+				 u32 bdf_type)
 {
 	struct wlfw_bdf_download_req_msg_v01 *req;
 	struct wlfw_bdf_download_resp_msg_v01 *resp;
@@ -410,8 +413,8 @@
 	unsigned int remaining;
 	int ret = 0;
 
-	cnss_pr_dbg("Sending BDF download message, state: 0x%lx\n",
-		    plat_priv->driver_state);
+	cnss_pr_dbg("Sending BDF download message, state: 0x%lx, type: %d\n",
+		    plat_priv->driver_state, bdf_type);
 
 	req = kzalloc(sizeof(*req), GFP_KERNEL);
 	if (!req)
@@ -423,7 +426,7 @@
 		return -ENOMEM;
 	}
 
-	switch (plat_priv->ctrl_params.bdf_type) {
+	switch (bdf_type) {
 	case CNSS_BDF_ELF:
 		if (plat_priv->board_info.board_id == 0xFF)
 			snprintf(filename, sizeof(filename), ELF_BDF_FILE_NAME);
@@ -448,6 +451,9 @@
 				 BIN_BDF_FILE_NAME_PREFIX "%04x",
 				 plat_priv->board_info.board_id);
 		break;
+	case CNSS_BDF_REGDB:
+		snprintf(filename, sizeof(filename), REGDB_FILE_NAME);
+		break;
 	case CNSS_BDF_DUMMY:
 		cnss_pr_dbg("CNSS_BDF_DUMMY is set, sending dummy BDF\n");
 		snprintf(filename, sizeof(filename), DUMMY_BDF_FILE_NAME);
@@ -541,7 +547,8 @@
 	if (plat_priv->ctrl_params.bdf_type != CNSS_BDF_DUMMY)
 		release_firmware(fw_entry);
 err_req_fw:
-	CNSS_ASSERT(0);
+	if (bdf_type != CNSS_BDF_REGDB)
+		CNSS_ASSERT(0);
 	kfree(req);
 	kfree(resp);
 	return ret;
@@ -1515,3 +1522,201 @@
 {
 	qmi_handle_release(&plat_priv->qmi_wlfw);
 }
+
+int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv)
+{
+	int ret;
+	struct coex_antenna_switch_to_wlan_req_msg_v01 *req;
+	struct coex_antenna_switch_to_wlan_resp_msg_v01 *resp;
+	struct qmi_txn txn;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	cnss_pr_dbg("Sending coex antenna switch_to_wlan\n");
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->antenna = plat_priv->antenna;
+
+	ret = qmi_txn_init(&plat_priv->coex_qmi, &txn,
+			   coex_antenna_switch_to_wlan_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		cnss_pr_err("Fail to init txn for coex antenna switch_to_wlan resp %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_send_request
+		(&plat_priv->coex_qmi, NULL, &txn,
+		 QMI_COEX_SWITCH_ANTENNA_TO_WLAN_REQ_V01,
+		 COEX_ANTENNA_SWITCH_TO_WLAN_REQ_MSG_V01_MAX_MSG_LEN,
+		 coex_antenna_switch_to_wlan_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		cnss_pr_err("Fail to send coex antenna switch_to_wlan req %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, COEX_TIMEOUT);
+	if (ret < 0) {
+		cnss_pr_err("Coex antenna switch_to_wlan resp wait failed with ret %d\n",
+			    ret);
+		goto out;
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("Coex antenna switch_to_wlan request rejected, result:%d error:%d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+		goto out;
+	}
+
+	if (resp->grant_valid)
+		plat_priv->grant = resp->grant;
+
+	cnss_pr_dbg("Coex antenna grant: 0x%llx\n", resp->grant);
+
+	kfree(resp);
+	kfree(req);
+	return 0;
+
+out:
+	kfree(resp);
+	kfree(req);
+	return ret;
+}
+
+int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv)
+{
+	int ret;
+	struct coex_antenna_switch_to_mdm_req_msg_v01 *req;
+	struct coex_antenna_switch_to_mdm_resp_msg_v01 *resp;
+	struct qmi_txn txn;
+
+	if (!plat_priv)
+		return -ENODEV;
+
+	cnss_pr_dbg("Sending coex antenna switch_to_mdm\n");
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->antenna = plat_priv->antenna;
+
+	ret = qmi_txn_init(&plat_priv->coex_qmi, &txn,
+			   coex_antenna_switch_to_mdm_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		cnss_pr_err("Fail to init txn for coex antenna switch_to_mdm resp %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_send_request
+		(&plat_priv->coex_qmi, NULL, &txn,
+		 QMI_COEX_SWITCH_ANTENNA_TO_MDM_REQ_V01,
+		 COEX_ANTENNA_SWITCH_TO_MDM_REQ_MSG_V01_MAX_MSG_LEN,
+		 coex_antenna_switch_to_mdm_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		cnss_pr_err("Fail to send coex antenna switch_to_mdm req %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, COEX_TIMEOUT);
+	if (ret < 0) {
+		cnss_pr_err("Coex antenna switch_to_mdm resp wait failed with ret %d\n",
+			    ret);
+		goto out;
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		cnss_pr_err("Coex antenna switch_to_mdm request rejected, result:%d error:%d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+		goto out;
+	}
+
+	kfree(resp);
+	kfree(req);
+	return 0;
+
+out:
+	kfree(resp);
+	kfree(req);
+	return ret;
+}
+
+static int coex_new_server(struct qmi_handle *qmi,
+			   struct qmi_service *service)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(qmi, struct cnss_plat_data, coex_qmi);
+	struct sockaddr_qrtr sq = { 0 };
+	int ret = 0;
+
+	cnss_pr_dbg("COEX server arrive: node %u port %u\n",
+		    service->node, service->port);
+
+	sq.sq_family = AF_QIPCRTR;
+	sq.sq_node = service->node;
+	sq.sq_port = service->port;
+	ret = kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0);
+	if (ret < 0) {
+		cnss_pr_err("Fail to connect to remote service port\n");
+		return ret;
+	}
+
+	set_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state);
+	cnss_pr_dbg("COEX Server Connected: 0x%llx\n",
+		    plat_priv->driver_state);
+	return 0;
+}
+
+static void coex_del_server(struct qmi_handle *qmi,
+			    struct qmi_service *service)
+{
+	struct cnss_plat_data *plat_priv =
+		container_of(qmi, struct cnss_plat_data, coex_qmi);
+
+	cnss_pr_dbg("COEX server exit\n");
+
+	clear_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state);
+}
+
+static struct qmi_ops coex_qmi_ops = {
+	.new_server = coex_new_server,
+	.del_server = coex_del_server,
+};
+
+int cnss_register_coex_service(struct cnss_plat_data *plat_priv)
+{	int ret;
+
+	ret = qmi_handle_init(&plat_priv->coex_qmi,
+			      COEX_SERVICE_MAX_MSG_LEN,
+			      &coex_qmi_ops, NULL);
+	if (ret < 0)
+		return ret;
+
+	ret = qmi_add_lookup(&plat_priv->coex_qmi, COEX_SERVICE_ID_V01,
+			     COEX_SERVICE_VERS_V01, 0);
+	return ret;
+}
+
+void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv)
+{
+	qmi_handle_release(&plat_priv->coex_qmi);
+}
diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h
index ad3c7d2..137d549 100644
--- a/drivers/net/wireless/cnss2/qmi.h
+++ b/drivers/net/wireless/cnss2/qmi.h
@@ -10,6 +10,7 @@
 
 #ifdef CONFIG_CNSS2_QMI
 #include "wlan_firmware_service_v01.h"
+#include "coexistence_service_v01.h"
 
 struct cnss_qmi_event_server_arrive_data {
 	unsigned int node;
@@ -23,7 +24,8 @@
 int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv);
 int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv);
 int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv);
-int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
+				 u32 bdf_type);
 int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv);
 int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
 				  enum cnss_driver_mode mode);
@@ -40,6 +42,11 @@
 			    u8 fw_log_mode);
 int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv);
 int cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_register_coex_service(struct cnss_plat_data *plat_priv);
+void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv);
+int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv);
+int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv);
+
 #else
 #define QMI_WLFW_TIMEOUT_MS		10000
 
@@ -80,7 +87,8 @@
 	return 0;
 }
 
-static inline int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv)
+static inline int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
+					       u32 bdf_type)
 {
 	return 0;
 }
@@ -139,6 +147,27 @@
 {
 	return 0;
 }
+
+static inline
+int cnss_register_coex_service(struct cnss_plat_data *plat_priv)
+{
+	return 0;
+}
+
+static inline
+void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv) {}
+
+static inline
+int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv)
+{
+	return 0;
+}
+
+static inline
+int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv)
+{
+	return 0;
+}
 #endif /* CONFIG_CNSS2_QMI */
 
 #endif /* _CNSS_QMI_H */
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
index d06cb95..d65447e 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
@@ -1221,6 +1221,24 @@
 					   num_macs),
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   voltage_mv_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct wlfw_cap_resp_msg_v01,
+					   voltage_mv),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.array_type       = NO_ARRAY,
 		.tlv_type       = QMI_COMMON_TLV_TYPE,
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
index a5346c8..cc41f83 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
@@ -380,9 +380,11 @@
 	char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
 	u8 num_macs_valid;
 	u8 num_macs;
+	u8 voltage_mv_valid;
+	u32 voltage_mv;
 };
 
-#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 214
 extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
 
 struct wlfw_bdf_download_req_msg_v01 {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 4d49a1a..16c6c7f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -868,6 +868,15 @@
 	int ret, i, j;
 	u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
 
+	/*
+	 * This command is not supported on earlier firmware versions.
+	 * Unfortunately, we don't have a TLV API flag to rely on, so
+	 * rely on the major version which is in the first byte of
+	 * ucode_ver.
+	 */
+	if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
+		return 0;
+
 	ret = iwl_mvm_sar_get_wgds_table(mvm);
 	if (ret < 0) {
 		IWL_DEBUG_RADIO(mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index afed549..9a764af 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2938,7 +2938,8 @@
 			iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
 		}
 
-		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
+		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+				     false);
 		ret = iwl_mvm_update_sta(mvm, vif, sta);
 	} else if (old_state == IEEE80211_STA_ASSOC &&
 		   new_state == IEEE80211_STA_AUTHORIZED) {
@@ -2954,7 +2955,8 @@
 		/* enable beacon filtering */
 		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
 
-		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
+		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+				     true);
 
 		ret = 0;
 	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index b3987a0..6b65ad6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1685,7 +1685,7 @@
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
 
 /* rate scaling */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync);
 void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
 int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
 void rs_update_last_rssi(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index f2830b5..6b9c670 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1280,7 +1280,7 @@
 		       (unsigned long)(lq_sta->last_tx +
 				       (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
 		IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
-		iwl_mvm_rs_rate_init(mvm, sta, info->band);
+		iwl_mvm_rs_rate_init(mvm, sta, info->band, true);
 		return;
 	}
 	lq_sta->last_tx = jiffies;
@@ -2870,9 +2870,8 @@
 static void rs_initialize_lq(struct iwl_mvm *mvm,
 			     struct ieee80211_sta *sta,
 			     struct iwl_lq_sta *lq_sta,
-			     enum nl80211_band band)
+			     enum nl80211_band band, bool update)
 {
-	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_scale_tbl_info *tbl;
 	struct rs_rate *rate;
 	u8 active_tbl = 0;
@@ -2901,8 +2900,7 @@
 	rs_set_expected_tpt_table(lq_sta, tbl);
 	rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
 	/* TODO restore station should remember the lq cmd */
-	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq,
-			    mvmsta->sta_state < IEEE80211_STA_AUTHORIZED);
+	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update);
 }
 
 static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
@@ -3155,7 +3153,7 @@
  * Called after adding a new station to initialize rate scaling
  */
 static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			     enum nl80211_band band)
+			     enum nl80211_band band, bool update)
 {
 	int i, j;
 	struct ieee80211_hw *hw = mvm->hw;
@@ -3235,7 +3233,7 @@
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	iwl_mvm_reset_frame_stats(mvm);
 #endif
-	rs_initialize_lq(mvm, sta, lq_sta, band);
+	rs_initialize_lq(mvm, sta, lq_sta, band, update);
 }
 
 static void rs_drv_rate_update(void *mvm_r,
@@ -3255,7 +3253,7 @@
 	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
 		ieee80211_stop_tx_ba_session(sta, tid);
 
-	iwl_mvm_rs_rate_init(mvm, sta, sband->band);
+	iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
 }
 
 #ifdef CONFIG_MAC80211_DEBUGFS
@@ -4112,12 +4110,12 @@
 };
 
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  enum nl80211_band band)
+			  enum nl80211_band band, bool update)
 {
 	if (iwl_mvm_has_tlc_offload(mvm))
 		rs_fw_rate_init(mvm, sta, band);
 	else
-		rs_drv_rate_init(mvm, sta, band);
+		rs_drv_rate_init(mvm, sta, band, update);
 }
 
 int iwl_mvm_rate_control_register(void)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index d2cf484..8e7f993 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -420,7 +420,7 @@
 
 /* Initialize station's rate scaling information after adding station */
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  enum nl80211_band band);
+			  enum nl80211_band band, bool init);
 
 /* Notify RS about Tx status */
 void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index b002a7a..6a53494 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -900,20 +900,19 @@
 
 /**
  * iwl_mvm_send_lq_cmd() - Send link quality command
- * @init: This command is sent as part of station initialization right
- *        after station has been added.
+ * @sync: This command can be sent synchronously.
  *
  * The link quality command is sent as the last step of station creation.
  * This is the special case in which init is set and we call a callback in
  * this case to clear the state indicating that station creation is in
  * progress.
  */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync)
 {
 	struct iwl_host_cmd cmd = {
 		.id = LQ_CMD,
 		.len = { sizeof(struct iwl_lq_cmd), },
-		.flags = init ? 0 : CMD_ASYNC,
+		.flags = sync ? 0 : CMD_ASYNC,
 		.data = { lq, },
 	};
 
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index b150da4..5d65500 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -518,6 +518,56 @@
 	{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
 
 /* 9000 Series */
+	{IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
 	{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index e2addd8..5d75c97 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -696,11 +696,10 @@
 				"Send delba to tid=%d, %pM\n",
 				tid, rx_reor_tbl_ptr->ta);
 			mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
-			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-					       flags);
-			return;
+			goto exit;
 		}
 	}
+exit:
 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 8e63d14..5380fba 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -103,8 +103,6 @@
  * There could be holes in the buffer, which are skipped by the function.
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
@@ -113,21 +111,25 @@
 {
 	int pkt_to_send, i;
 	void *rx_tmp_ptr;
+	unsigned long flags;
 
 	pkt_to_send = (start_win > tbl->start_win) ?
 		      min((start_win - tbl->start_win), tbl->win_size) :
 		      tbl->win_size;
 
 	for (i = 0; i < pkt_to_send; ++i) {
+		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 		rx_tmp_ptr = NULL;
 		if (tbl->rx_reorder_ptr[i]) {
 			rx_tmp_ptr = tbl->rx_reorder_ptr[i];
 			tbl->rx_reorder_ptr[i] = NULL;
 		}
+		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		if (rx_tmp_ptr)
 			mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
 	}
 
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	/*
 	 * We don't have a circular buffer, hence use rotation to simulate
 	 * circular buffer
@@ -138,6 +140,7 @@
 	}
 
 	tbl->start_win = start_win;
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
 /*
@@ -147,8 +150,6 @@
  * The start window is adjusted automatically when a hole is located.
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
@@ -156,15 +157,22 @@
 {
 	int i, j, xchg;
 	void *rx_tmp_ptr;
+	unsigned long flags;
 
 	for (i = 0; i < tbl->win_size; ++i) {
-		if (!tbl->rx_reorder_ptr[i])
+		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+		if (!tbl->rx_reorder_ptr[i]) {
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       flags);
 			break;
+		}
 		rx_tmp_ptr = tbl->rx_reorder_ptr[i];
 		tbl->rx_reorder_ptr[i] = NULL;
+		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
 	}
 
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	/*
 	 * We don't have a circular buffer, hence use rotation to simulate
 	 * circular buffer
@@ -177,6 +185,7 @@
 		}
 	}
 	tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
 /*
@@ -184,8 +193,6 @@
  *
  * The function stops the associated timer and dispatches all the
  * pending packets in the Rx reorder table before deletion.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
@@ -211,7 +218,11 @@
 
 	del_timer_sync(&tbl->timer_context.timer);
 	tbl->timer_context.timer_is_set = false;
+
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	list_del(&tbl->list);
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
 	kfree(tbl->rx_reorder_ptr);
 	kfree(tbl);
 
@@ -224,17 +235,22 @@
 /*
  * This function returns the pointer to an entry in Rx reordering
  * table which matches the given TA/TID pair.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 struct mwifiex_rx_reorder_tbl *
 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
 {
 	struct mwifiex_rx_reorder_tbl *tbl;
+	unsigned long flags;
 
-	list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
-		if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+	list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
+		if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       flags);
 			return tbl;
+		}
+	}
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	return NULL;
 }
@@ -251,9 +267,14 @@
 		return;
 
 	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
-	list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
-		if (!memcmp(tbl->ta, ta, ETH_ALEN))
+	list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
+		if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       flags);
 			mwifiex_del_rx_reorder_entry(priv, tbl);
+			spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+		}
+	}
 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	return;
@@ -262,18 +283,24 @@
 /*
  * This function finds the last sequence number used in the packets
  * buffered in Rx reordering table.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static int
 mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
 {
 	struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
+	struct mwifiex_private *priv = ctx->priv;
+	unsigned long flags;
 	int i;
 
-	for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
-		if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
+	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+	for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
+		if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
+			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+					       flags);
 			return i;
+		}
+	}
+	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	return -1;
 }
@@ -291,22 +318,17 @@
 	struct reorder_tmr_cnxt *ctx =
 		from_timer(ctx, t, timer);
 	int start_win, seq_num;
-	unsigned long flags;
 
 	ctx->timer_is_set = false;
-	spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
 	seq_num = mwifiex_11n_find_last_seq_num(ctx);
 
-	if (seq_num < 0) {
-		spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
+	if (seq_num < 0)
 		return;
-	}
 
 	mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
 	start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
 	mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
 						 start_win);
-	spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
 }
 
 /*
@@ -333,14 +355,11 @@
 	 * If we get a TID, ta pair which is already present dispatch all the
 	 * the packets and move the window size until the ssn
 	 */
-	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
 	if (tbl) {
 		mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		return;
 	}
-	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 	/* if !tbl then create one */
 	new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
 	if (!new_node)
@@ -551,20 +570,16 @@
 	int prev_start_win, start_win, end_win, win_size;
 	u16 pkt_index;
 	bool init_window_shift = false;
-	unsigned long flags;
 	int ret = 0;
 
-	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
 	if (!tbl) {
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		if (pkt_type != PKT_TYPE_BAR)
 			mwifiex_11n_dispatch_pkt(priv, payload);
 		return ret;
 	}
 
 	if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		mwifiex_11n_dispatch_pkt(priv, payload);
 		return ret;
 	}
@@ -651,8 +666,6 @@
 	if (!tbl->timer_context.timer_is_set ||
 	    prev_start_win != tbl->start_win)
 		mwifiex_11n_rxreorder_timer_restart(tbl);
-
-	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 	return ret;
 }
 
@@ -681,18 +694,14 @@
 		    peer_mac, tid, initiator);
 
 	if (cleanup_rx_reorder_tbl) {
-		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 		tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
 								 peer_mac);
 		if (!tbl) {
-			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-					       flags);
 			mwifiex_dbg(priv->adapter, EVENT,
 				    "event: TID, TA not found in table\n");
 			return;
 		}
 		mwifiex_del_rx_reorder_entry(priv, tbl);
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 	} else {
 		ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
 		if (!ptx_tbl) {
@@ -726,7 +735,6 @@
 	int tid, win_size;
 	struct mwifiex_rx_reorder_tbl *tbl;
 	uint16_t block_ack_param_set;
-	unsigned long flags;
 
 	block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
 
@@ -740,20 +748,17 @@
 		mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
 			    add_ba_rsp->peer_mac_addr, tid);
 
-		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 		tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
 						     add_ba_rsp->peer_mac_addr);
 		if (tbl)
 			mwifiex_del_rx_reorder_entry(priv, tbl);
 
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		return 0;
 	}
 
 	win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
 		    >> BLOCKACKPARAM_WINSIZE_POS;
 
-	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
 					     add_ba_rsp->peer_mac_addr);
 	if (tbl) {
@@ -764,7 +769,6 @@
 		else
 			tbl->amsdu = false;
 	}
-	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	mwifiex_dbg(priv->adapter, CMD,
 		    "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -804,8 +808,11 @@
 
 	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	list_for_each_entry_safe(del_tbl_ptr, tmp_node,
-				 &priv->rx_reorder_tbl_ptr, list)
+				 &priv->rx_reorder_tbl_ptr, list) {
+		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
+		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+	}
 	INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
@@ -929,7 +936,6 @@
 	int tlv_buf_left = len;
 	int ret;
 	u8 *tmp;
-	unsigned long flags;
 
 	mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
 			 event_buf, len);
@@ -949,18 +955,14 @@
 			    tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
 			    tlv_bitmap_len);
 
-		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 		rx_reor_tbl_ptr =
 			mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
 						       tlv_rxba->mac);
 		if (!rx_reor_tbl_ptr) {
-			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-					       flags);
 			mwifiex_dbg(priv->adapter, ERROR,
 				    "Can not find rx_reorder_tbl!");
 			return;
 		}
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 		for (i = 0; i < tlv_bitmap_len; i++) {
 			for (j = 0 ; j < 8; j++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index a83c5af..5ce85d5 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -421,15 +421,12 @@
 		spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
 	}
 
-	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
 	if (!priv->ap_11n_enabled ||
 	    (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
 	    (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
 		ret = mwifiex_handle_uap_rx_forward(priv, skb);
-		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 		return ret;
 	}
-	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
 	/* Reorder and send to kernel */
 	pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index 7cdb3e7..0a3e046 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -681,6 +681,7 @@
 	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+	ieee80211_hw_set(hw, MFP_CAPABLE);
 	hw->max_rates = 1;
 	hw->max_report_rates = 7;
 	hw->max_rate_tries = 1;
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index af48d43..20447fd 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -385,7 +385,12 @@
 
 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
 		struct ieee80211_txq *txq = sta->txq[i];
-		struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+		struct mt76_txq *mtxq;
+
+		if (!txq)
+			continue;
+
+		mtxq = (struct mt76_txq *)txq->drv_priv;
 
 		spin_lock_bh(&mtxq->hwq->lock);
 		mtxq->send_bar = mtxq->aggr && send_bar;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index f4122c8..ef9b502 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -2289,6 +2289,7 @@
 
 	if (rtl_c2h_fast_cmd(hw, skb)) {
 		rtl_c2h_content_parsing(hw, skb);
+		kfree_skb(skb);
 		return;
 	}
 
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f17f602..5b97cc9 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -905,7 +905,7 @@
 		if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
 			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
-			BUG_ON(pull_to <= skb_headlen(skb));
+			BUG_ON(pull_to < skb_headlen(skb));
 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 		}
 		if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 2082ae0..1d432c5 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -309,8 +309,11 @@
 	blk_cleanup_queue(q);
 }
 
-static void pmem_freeze_queue(void *q)
+static void pmem_freeze_queue(struct percpu_ref *ref)
 {
+	struct request_queue *q;
+
+	q = container_of(ref, typeof(*q), q_usage_counter);
 	blk_freeze_queue_start(q);
 }
 
@@ -402,6 +405,7 @@
 
 	pmem->pfn_flags = PFN_DEV;
 	pmem->pgmap.ref = &q->q_usage_counter;
+	pmem->pgmap.kill = pmem_freeze_queue;
 	if (is_nd_pfn(dev)) {
 		if (setup_pagemap_fsdax(dev, &pmem->pgmap))
 			return -ENOMEM;
@@ -427,13 +431,6 @@
 		memcpy(&bb_res, &nsio->res, sizeof(bb_res));
 	}
 
-	/*
-	 * At release time the queue must be frozen before
-	 * devm_memremap_pages is unwound
-	 */
-	if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
-		return -ENOMEM;
-
 	if (IS_ERR(addr))
 		return PTR_ERR(addr);
 	pmem->virt_addr = addr;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index e57f390..08f997a 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -139,6 +139,10 @@
 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
+static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
+				struct nvmet_rdma_rsp *r);
+static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
+				struct nvmet_rdma_rsp *r);
 
 static const struct nvmet_fabrics_ops nvmet_rdma_ops;
 
@@ -182,9 +186,17 @@
 	spin_unlock_irqrestore(&queue->rsps_lock, flags);
 
 	if (unlikely(!rsp)) {
-		rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+		int ret;
+
+		rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
 		if (unlikely(!rsp))
 			return NULL;
+		ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
+		if (unlikely(ret)) {
+			kfree(rsp);
+			return NULL;
+		}
+
 		rsp->allocated = true;
 	}
 
@@ -196,7 +208,8 @@
 {
 	unsigned long flags;
 
-	if (rsp->allocated) {
+	if (unlikely(rsp->allocated)) {
+		nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
 		kfree(rsp);
 		return;
 	}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 70f5fd0..3f21ea6 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -115,9 +115,6 @@
 }
 #endif
 
-static struct device_node **phandle_cache;
-static u32 phandle_cache_mask;
-
 /*
  * Assumptions behind phandle_cache implementation:
  *   - phandle property values are in a contiguous range of 1..n
@@ -126,6 +123,66 @@
  *   - the phandle lookup overhead reduction provided by the cache
  *     will likely be less
  */
+
+static struct device_node **phandle_cache;
+static u32 phandle_cache_mask;
+
+/*
+ * Caller must hold devtree_lock.
+ */
+static void __of_free_phandle_cache(void)
+{
+	u32 cache_entries = phandle_cache_mask + 1;
+	u32 k;
+
+	if (!phandle_cache)
+		return;
+
+	for (k = 0; k < cache_entries; k++)
+		of_node_put(phandle_cache[k]);
+
+	kfree(phandle_cache);
+	phandle_cache = NULL;
+}
+
+int of_free_phandle_cache(void)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&devtree_lock, flags);
+
+	__of_free_phandle_cache();
+
+	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+	return 0;
+}
+#if !defined(CONFIG_MODULES)
+late_initcall_sync(of_free_phandle_cache);
+#endif
+
+/*
+ * Caller must hold devtree_lock.
+ */
+void __of_free_phandle_cache_entry(phandle handle)
+{
+	phandle masked_handle;
+	struct device_node *np;
+
+	if (!handle)
+		return;
+
+	masked_handle = handle & phandle_cache_mask;
+
+	if (phandle_cache) {
+		np = phandle_cache[masked_handle];
+		if (np && handle == np->phandle) {
+			of_node_put(np);
+			phandle_cache[masked_handle] = NULL;
+		}
+	}
+}
+
 void of_populate_phandle_cache(void)
 {
 	unsigned long flags;
@@ -135,8 +192,7 @@
 
 	raw_spin_lock_irqsave(&devtree_lock, flags);
 
-	kfree(phandle_cache);
-	phandle_cache = NULL;
+	__of_free_phandle_cache();
 
 	for_each_of_allnodes(np)
 		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
@@ -154,30 +210,15 @@
 		goto out;
 
 	for_each_of_allnodes(np)
-		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
+		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
+			of_node_get(np);
 			phandle_cache[np->phandle & phandle_cache_mask] = np;
+		}
 
 out:
 	raw_spin_unlock_irqrestore(&devtree_lock, flags);
 }
 
-int of_free_phandle_cache(void)
-{
-	unsigned long flags;
-
-	raw_spin_lock_irqsave(&devtree_lock, flags);
-
-	kfree(phandle_cache);
-	phandle_cache = NULL;
-
-	raw_spin_unlock_irqrestore(&devtree_lock, flags);
-
-	return 0;
-}
-#if !defined(CONFIG_MODULES)
-late_initcall_sync(of_free_phandle_cache);
-#endif
-
 void __init of_core_init(void)
 {
 	struct device_node *np;
@@ -1150,13 +1191,23 @@
 		if (phandle_cache[masked_handle] &&
 		    handle == phandle_cache[masked_handle]->phandle)
 			np = phandle_cache[masked_handle];
+		if (np && of_node_check_flag(np, OF_DETACHED)) {
+			WARN_ON(1); /* did not uncache np on node removal */
+			of_node_put(np);
+			phandle_cache[masked_handle] = NULL;
+			np = NULL;
+		}
 	}
 
 	if (!np) {
 		for_each_of_allnodes(np)
-			if (np->phandle == handle) {
-				if (phandle_cache)
+			if (np->phandle == handle &&
+			    !of_node_check_flag(np, OF_DETACHED)) {
+				if (phandle_cache) {
+					/* will put when removed from cache */
+					of_node_get(np);
 					phandle_cache[masked_handle] = np;
+				}
 				break;
 			}
 	}
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 40b9051..2587428 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -221,7 +221,8 @@
 		return -ENODEV;
 
 	/* Name & Type */
-	csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name,
+	/* %p eats all alphanum characters, so %c must be used here */
+	csize = snprintf(str, len, "of:N%pOFn%c%s", dev->of_node, 'T',
 			 dev->of_node->type);
 	tsize = csize;
 	len -= csize;
@@ -300,7 +301,7 @@
 	if ((!dev) || (!dev->of_node))
 		return;
 
-	add_uevent_var(env, "OF_NAME=%s", dev->of_node->name);
+	add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
 	add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
 	if (dev->of_node->type && strcmp("<NULL>", dev->of_node->type) != 0)
 		add_uevent_var(env, "OF_TYPE=%s", dev->of_node->type);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index f4f8ed9..45c0b1f 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -268,13 +268,13 @@
 	}
 
 	of_node_set_flag(np, OF_DETACHED);
+
+	/* race with of_find_node_by_phandle() prevented by devtree_lock */
+	__of_free_phandle_cache_entry(np->phandle);
 }
 
 /**
  * of_detach_node() - "Unplug" a node from the device tree.
- *
- * The caller must hold a reference to the node.  The memory associated with
- * the node is not freed until its refcount goes to zero.
  */
 int of_detach_node(struct device_node *np)
 {
@@ -330,6 +330,25 @@
 	if (!of_node_check_flag(node, OF_DYNAMIC))
 		return;
 
+	if (of_node_check_flag(node, OF_OVERLAY)) {
+
+		if (!of_node_check_flag(node, OF_OVERLAY_FREE_CSET)) {
+			/* premature refcount of zero, do not free memory */
+			pr_err("ERROR: memory leak before free overlay changeset,  %pOF\n",
+			       node);
+			return;
+		}
+
+		/*
+		 * If node->properties non-empty then properties were added
+		 * to this node either by different overlay that has not
+		 * yet been removed, or by a non-overlay mechanism.
+		 */
+		if (node->properties)
+			pr_err("ERROR: %s(), unexpected properties in %pOF\n",
+			       __func__, node);
+	}
+
 	property_list_free(node->properties);
 	property_list_free(node->deadprops);
 
@@ -434,6 +453,16 @@
 
 static void __of_changeset_entry_destroy(struct of_changeset_entry *ce)
 {
+	if (ce->action == OF_RECONFIG_ATTACH_NODE &&
+	    of_node_check_flag(ce->np, OF_OVERLAY)) {
+		if (kref_read(&ce->np->kobj.kref) > 1) {
+			pr_err("ERROR: memory leak, expected refcount 1 instead of %d, of_node_get()/of_node_put() unbalanced - destroy cset entry: attach overlay node %pOF\n",
+			       kref_read(&ce->np->kobj.kref), ce->np);
+		} else {
+			of_node_set_flag(ce->np, OF_OVERLAY_FREE_CSET);
+		}
+	}
+
 	of_node_put(ce->np);
 	list_del(&ce->node);
 	kfree(ce);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 02ad93a..f091258 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -350,6 +350,8 @@
 int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 {
 	int irq = of_irq_get(dev, index);
+	u32 trigger_type;
+	struct of_phandle_args oirq;
 
 	if (irq < 0)
 		return irq;
@@ -367,8 +369,17 @@
 		of_property_read_string_index(dev, "interrupt-names", index,
 					      &name);
 
+		trigger_type = irqd_get_trigger_type(irq_get_irq_data(irq));
+
+		of_irq_parse_one(dev, index, &oirq);
+
+		if (!trigger_type &&
+			of_device_is_compatible(oirq.np, "arm,gic-v3"))
+			pr_err("IRQ TYPE should not be NONE for %s\n",
+							dev->full_name);
+
 		r->start = r->end = irq;
-		r->flags = IORESOURCE_IRQ | irqd_get_trigger_type(irq_get_irq_data(irq));
+		r->flags = IORESOURCE_IRQ | trigger_type;
 		r->name = name ? name : of_node_full_name(dev);
 	}
 
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
index 7a0a189..c72eef9 100644
--- a/drivers/of/kobj.c
+++ b/drivers/of/kobj.c
@@ -133,6 +133,9 @@
 	}
 	if (!name)
 		return -ENOMEM;
+
+	of_node_get(np);
+
 	rc = kobject_add(&np->kobj, parent, "%s", name);
 	kfree(name);
 	if (rc)
@@ -159,6 +162,5 @@
 		kobject_del(&np->kobj);
 	}
 
-	/* finally remove the kobj_init ref */
 	of_node_put(np);
 }
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index e92391d..5ad1342 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -97,8 +97,8 @@
 		return rc;
 	}
 
-	dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
-		child->name, addr);
+	dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
+		child, addr);
 	return 0;
 }
 
@@ -127,8 +127,8 @@
 		return rc;
 	}
 
-	dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
-		child->name, addr);
+	dev_dbg(&mdio->dev, "registered mdio device %pOFn at address %i\n",
+		child, addr);
 	return 0;
 }
 
@@ -263,8 +263,8 @@
 				continue;
 
 			/* be noisy to encourage people to set reg property */
-			dev_info(&mdio->dev, "scan phy %s at address %i\n",
-				 child->name, addr);
+			dev_info(&mdio->dev, "scan phy %pOFn at address %i\n",
+				 child, addr);
 
 			if (of_mdiobus_child_is_phy(child)) {
 				rc = of_mdiobus_register_phy(mdio, child, addr);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 2411ed3..f5b4522 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -168,8 +168,8 @@
 		np = of_get_next_parent(np);
 	}
 	if (np && r)
-		pr_warn("Invalid \"numa-node-id\" property in node %s\n",
-			np->name);
+		pr_warn("Invalid \"numa-node-id\" property in node %pOFn\n",
+			np);
 	of_node_put(np);
 
 	/*
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 216175d..f5da842 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -76,6 +76,10 @@
 int of_resolve_phandles(struct device_node *tree);
 #endif
 
+#if defined(CONFIG_OF_DYNAMIC)
+void __of_free_phandle_cache_entry(phandle handle);
+#endif
+
 #if defined(CONFIG_OF_OVERLAY)
 void of_overlay_mutex_lock(void);
 void of_overlay_mutex_unlock(void);
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index eda57ef..9808aae 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -24,6 +24,26 @@
 #include "of_private.h"
 
 /**
+ * struct target - info about current target node as recursing through overlay
+ * @np:			node where current level of overlay will be applied
+ * @in_livetree:	@np is a node in the live devicetree
+ *
+ * Used in the algorithm to create the portion of a changeset that describes
+ * an overlay fragment, which is a devicetree subtree.  Initially @np is a node
+ * in the live devicetree where the overlay subtree is targeted to be grafted
+ * into.  When recursing to the next level of the overlay subtree, the target
+ * also recurses to the next level of the live devicetree, as long as overlay
+ * subtree node also exists in the live devicetree.  When a node in the overlay
+ * subtree does not exist at the same level in the live devicetree, target->np
+ * points to a newly allocated node, and all subsequent targets in the subtree
+ * will be newly allocated nodes.
+ */
+struct target {
+	struct device_node *np;
+	bool in_livetree;
+};
+
+/**
  * struct fragment - info about fragment nodes in overlay expanded device tree
  * @target:	target of the overlay operation
  * @overlay:	pointer to the __overlay__ node
@@ -72,8 +92,7 @@
 }
 
 static int build_changeset_next_level(struct overlay_changeset *ovcs,
-		struct device_node *target_node,
-		const struct device_node *overlay_node);
+		struct target *target, const struct device_node *overlay_node);
 
 /*
  * of_resolve_phandles() finds the largest phandle in the live tree.
@@ -257,14 +276,17 @@
 /**
  * add_changeset_property() - add @overlay_prop to overlay changeset
  * @ovcs:		overlay changeset
- * @target_node:	where to place @overlay_prop in live tree
+ * @target:		where @overlay_prop will be placed
  * @overlay_prop:	property to add or update, from overlay tree
  * @is_symbols_prop:	1 if @overlay_prop is from node "/__symbols__"
  *
- * If @overlay_prop does not already exist in @target_node, add changeset entry
- * to add @overlay_prop in @target_node, else add changeset entry to update
+ * If @overlay_prop does not already exist in live devicetree, add changeset
+ * entry to add @overlay_prop in @target, else add changeset entry to update
  * value of @overlay_prop.
  *
+ * @target may be either in the live devicetree or in a new subtree that
+ * is contained in the changeset.
+ *
  * Some special properties are not updated (no error returned).
  *
  * Update of property in symbols node is not allowed.
@@ -273,20 +295,22 @@
  * invalid @overlay.
  */
 static int add_changeset_property(struct overlay_changeset *ovcs,
-		struct device_node *target_node,
-		struct property *overlay_prop,
+		struct target *target, struct property *overlay_prop,
 		bool is_symbols_prop)
 {
 	struct property *new_prop = NULL, *prop;
 	int ret = 0;
 
-	prop = of_find_property(target_node, overlay_prop->name, NULL);
-
 	if (!of_prop_cmp(overlay_prop->name, "name") ||
 	    !of_prop_cmp(overlay_prop->name, "phandle") ||
 	    !of_prop_cmp(overlay_prop->name, "linux,phandle"))
 		return 0;
 
+	if (target->in_livetree)
+		prop = of_find_property(target->np, overlay_prop->name, NULL);
+	else
+		prop = NULL;
+
 	if (is_symbols_prop) {
 		if (prop)
 			return -EINVAL;
@@ -299,10 +323,10 @@
 		return -ENOMEM;
 
 	if (!prop)
-		ret = of_changeset_add_property(&ovcs->cset, target_node,
+		ret = of_changeset_add_property(&ovcs->cset, target->np,
 						new_prop);
 	else
-		ret = of_changeset_update_property(&ovcs->cset, target_node,
+		ret = of_changeset_update_property(&ovcs->cset, target->np,
 						   new_prop);
 
 	if (ret) {
@@ -315,14 +339,14 @@
 
 /**
  * add_changeset_node() - add @node (and children) to overlay changeset
- * @ovcs:		overlay changeset
- * @target_node:	where to place @node in live tree
- * @node:		node from within overlay device tree fragment
+ * @ovcs:	overlay changeset
+ * @target:	where @node will be placed in live tree or changeset
+ * @node:	node from within overlay device tree fragment
  *
- * If @node does not already exist in @target_node, add changeset entry
- * to add @node in @target_node.
+ * If @node does not already exist in @target, add changeset entry
+ * to add @node in @target.
  *
- * If @node already exists in @target_node, and the existing node has
+ * If @node already exists in @target, and the existing node has
  * a phandle, the overlay node is not allowed to have a phandle.
  *
  * If @node has child nodes, add the children recursively via
@@ -355,36 +379,46 @@
  * invalid @overlay.
  */
 static int add_changeset_node(struct overlay_changeset *ovcs,
-		struct device_node *target_node, struct device_node *node)
+		struct target *target, struct device_node *node)
 {
 	const char *node_kbasename;
 	struct device_node *tchild;
+	struct target target_child;
 	int ret = 0;
 
 	node_kbasename = kbasename(node->full_name);
 
-	for_each_child_of_node(target_node, tchild)
+	for_each_child_of_node(target->np, tchild)
 		if (!of_node_cmp(node_kbasename, kbasename(tchild->full_name)))
 			break;
 
 	if (!tchild) {
-		tchild = __of_node_dup(node, node_kbasename);
+		tchild = __of_node_dup(NULL, node_kbasename);
 		if (!tchild)
 			return -ENOMEM;
 
-		tchild->parent = target_node;
+		tchild->parent = target->np;
+		of_node_set_flag(tchild, OF_OVERLAY);
 
 		ret = of_changeset_attach_node(&ovcs->cset, tchild);
 		if (ret)
 			return ret;
 
-		return build_changeset_next_level(ovcs, tchild, node);
+		target_child.np = tchild;
+		target_child.in_livetree = false;
+
+		ret = build_changeset_next_level(ovcs, &target_child, node);
+		of_node_put(tchild);
+		return ret;
 	}
 
-	if (node->phandle && tchild->phandle)
+	if (node->phandle && tchild->phandle) {
 		ret = -EINVAL;
-	else
-		ret = build_changeset_next_level(ovcs, tchild, node);
+	} else {
+		target_child.np = tchild;
+		target_child.in_livetree = target->in_livetree;
+		ret = build_changeset_next_level(ovcs, &target_child, node);
+	}
 	of_node_put(tchild);
 
 	return ret;
@@ -393,7 +427,7 @@
 /**
  * build_changeset_next_level() - add level of overlay changeset
  * @ovcs:		overlay changeset
- * @target_node:	where to place @overlay_node in live tree
+ * @target:		where to place @overlay_node in live tree
  * @overlay_node:	node from within an overlay device tree fragment
  *
  * Add the properties (if any) and nodes (if any) from @overlay_node to the
@@ -406,27 +440,26 @@
  * invalid @overlay_node.
  */
 static int build_changeset_next_level(struct overlay_changeset *ovcs,
-		struct device_node *target_node,
-		const struct device_node *overlay_node)
+		struct target *target, const struct device_node *overlay_node)
 {
 	struct device_node *child;
 	struct property *prop;
 	int ret;
 
 	for_each_property_of_node(overlay_node, prop) {
-		ret = add_changeset_property(ovcs, target_node, prop, 0);
+		ret = add_changeset_property(ovcs, target, prop, 0);
 		if (ret) {
 			pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
-				 target_node, prop->name, ret);
+				 target->np, prop->name, ret);
 			return ret;
 		}
 	}
 
 	for_each_child_of_node(overlay_node, child) {
-		ret = add_changeset_node(ovcs, target_node, child);
+		ret = add_changeset_node(ovcs, target, child);
 		if (ret) {
-			pr_debug("Failed to apply node @%pOF/%s, err=%d\n",
-				 target_node, child->name, ret);
+			pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n",
+				 target->np, child, ret);
 			of_node_put(child);
 			return ret;
 		}
@@ -439,17 +472,17 @@
  * Add the properties from __overlay__ node to the @ovcs->cset changeset.
  */
 static int build_changeset_symbols_node(struct overlay_changeset *ovcs,
-		struct device_node *target_node,
+		struct target *target,
 		const struct device_node *overlay_symbols_node)
 {
 	struct property *prop;
 	int ret;
 
 	for_each_property_of_node(overlay_symbols_node, prop) {
-		ret = add_changeset_property(ovcs, target_node, prop, 1);
+		ret = add_changeset_property(ovcs, target, prop, 1);
 		if (ret) {
 			pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
-				 target_node, prop->name, ret);
+				 target->np, prop->name, ret);
 			return ret;
 		}
 	}
@@ -472,6 +505,7 @@
 static int build_changeset(struct overlay_changeset *ovcs)
 {
 	struct fragment *fragment;
+	struct target target;
 	int fragments_count, i, ret;
 
 	/*
@@ -486,7 +520,9 @@
 	for (i = 0; i < fragments_count; i++) {
 		fragment = &ovcs->fragments[i];
 
-		ret = build_changeset_next_level(ovcs, fragment->target,
+		target.np = fragment->target;
+		target.in_livetree = true;
+		ret = build_changeset_next_level(ovcs, &target,
 						 fragment->overlay);
 		if (ret) {
 			pr_debug("apply failed '%pOF'\n", fragment->target);
@@ -496,7 +532,10 @@
 
 	if (ovcs->symbols_fragment) {
 		fragment = &ovcs->fragments[ovcs->count - 1];
-		ret = build_changeset_symbols_node(ovcs, fragment->target,
+
+		target.np = fragment->target;
+		target.in_livetree = true;
+		ret = build_changeset_symbols_node(ovcs, &target,
 						   fragment->overlay);
 		if (ret) {
 			pr_debug("apply failed '%pOF'\n", fragment->target);
@@ -514,7 +553,7 @@
  * 1) "target" property containing the phandle of the target
  * 2) "target-path" property containing the path of the target
  */
-static struct device_node *find_target_node(struct device_node *info_node)
+static struct device_node *find_target(struct device_node *info_node)
 {
 	struct device_node *node;
 	const char *path;
@@ -620,7 +659,7 @@
 
 		fragment = &fragments[cnt];
 		fragment->overlay = overlay_node;
-		fragment->target = find_target_node(node);
+		fragment->target = find_target(node);
 		if (!fragment->target) {
 			of_node_put(fragment->overlay);
 			ret = -EINVAL;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 7bd0af3..7c4abf5 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -92,8 +92,8 @@
 		 */
 		reg = of_get_property(node, "reg", NULL);
 		if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
-			dev_set_name(dev, dev_name(dev) ? "%llx.%s:%s" : "%llx.%s",
-				     (unsigned long long)addr, node->name,
+			dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
+				     (unsigned long long)addr, node,
 				     dev_name(dev));
 			return;
 		}
@@ -143,8 +143,8 @@
 			WARN_ON(rc);
 		}
 		if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
-			pr_debug("not all legacy IRQ resources mapped for %s\n",
-				 np->name);
+			pr_debug("not all legacy IRQ resources mapped for %pOFn\n",
+				 np);
 	}
 
 	dev->dev.of_node = of_node_get(np);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index f46828e..43720c2d 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -806,6 +806,7 @@
 
 	if (!of_device_is_available(remote)) {
 		pr_debug("not available for remote node\n");
+		of_node_put(remote);
 		return NULL;
 	}
 
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 41b4971..7f42314 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -212,8 +212,8 @@
 
 	for_each_child_of_node(np, child) {
 		if (child->parent != np) {
-			pr_err("Child node %s links to wrong parent %s\n",
-				 child->name, np->name);
+			pr_err("Child node %pOFn links to wrong parent %pOFn\n",
+				 child, np);
 			rc = -EINVAL;
 			goto put_child;
 		}
@@ -1046,16 +1046,16 @@
 	for_each_child_of_node(np, child) {
 		for_each_child_of_node(child, grandchild)
 			unittest(of_find_device_by_node(grandchild),
-				 "Could not create device for node '%s'\n",
-				 grandchild->name);
+				 "Could not create device for node '%pOFn'\n",
+				 grandchild);
 	}
 
 	of_platform_depopulate(&test_bus->dev);
 	for_each_child_of_node(np, child) {
 		for_each_child_of_node(child, grandchild)
 			unittest(!of_find_device_by_node(grandchild),
-				 "device didn't get destroyed '%s'\n",
-				 grandchild->name);
+				 "device didn't get destroyed '%pOFn'\n",
+				 grandchild);
 	}
 
 	platform_device_unregister(test_bus);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 29a0575..0fa9e8f 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -99,9 +99,6 @@
 					       (i * MAX_MSI_IRQS_PER_CTRL) +
 					       pos);
 			generic_handle_irq(irq);
-			dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
-						(i * MSI_REG_CTRL_BLOCK_SIZE),
-					    4, 1 << pos);
 			pos++;
 		}
 	}
@@ -168,8 +165,8 @@
 		bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
 
 		pp->irq_status[ctrl] &= ~(1 << bit);
-		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
-				    pp->irq_status[ctrl]);
+		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+				    ~pp->irq_status[ctrl]);
 	}
 
 	raw_spin_unlock_irqrestore(&pp->lock, flags);
@@ -191,8 +188,8 @@
 		bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
 
 		pp->irq_status[ctrl] |= 1 << bit;
-		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
-				    pp->irq_status[ctrl]);
+		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+				    ~pp->irq_status[ctrl]);
 	}
 
 	raw_spin_unlock_irqrestore(&pp->lock, flags);
@@ -200,13 +197,22 @@
 
 static void dw_pci_bottom_ack(struct irq_data *d)
 {
-	struct msi_desc *msi = irq_data_get_msi_desc(d);
-	struct pcie_port *pp;
+	struct pcie_port *pp  = irq_data_get_irq_chip_data(d);
+	unsigned int res, bit, ctrl;
+	unsigned long flags;
 
-	pp = msi_desc_to_pci_sysdata(msi);
+	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit);
 
 	if (pp->ops->msi_irq_ack)
 		pp->ops->msi_irq_ack(d->hwirq, pp);
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
 }
 
 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
@@ -658,10 +664,15 @@
 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
 
 	/* Initialize IRQ Status array */
-	for (ctrl = 0; ctrl < num_ctrls; ctrl++)
-		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+	for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
 					(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
-				    4, &pp->irq_status[ctrl]);
+				    4, ~0);
+		dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+					(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+				    4, ~0);
+		pp->irq_status[ctrl] = 0;
+	}
 
 	/* Setup RC BARs */
 	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
diff --git a/drivers/pci/controller/pci-msm-msi.c b/drivers/pci/controller/pci-msm-msi.c
index 782140b..1bf7328 100644
--- a/drivers/pci/controller/pci-msm-msi.c
+++ b/drivers/pci/controller/pci-msm-msi.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
 
 #include <linux/interrupt.h>
 #include <linux/iommu.h>
@@ -55,10 +55,40 @@
 	chained_irq_exit(chip, desc);
 }
 
+static void msm_msi_mask_irq(struct irq_data *data)
+{
+	struct irq_data *parent_data;
+
+	if (!data->parent_data)
+		return;
+
+	parent_data = irq_get_irq_data(data->parent_data->hwirq);
+	if (!parent_data || !parent_data->chip)
+		return;
+
+	pci_msi_mask_irq(data);
+	parent_data->chip->irq_mask(parent_data);
+}
+
+static void msm_msi_unmask_irq(struct irq_data *data)
+{
+	struct irq_data *parent_data;
+
+	if (!data->parent_data)
+		return;
+
+	parent_data = irq_get_irq_data(data->parent_data->hwirq);
+	if (!parent_data || !parent_data->chip)
+		return;
+
+	parent_data->chip->irq_unmask(parent_data);
+	pci_msi_unmask_irq(data);
+}
+
 static struct irq_chip msm_msi_irq_chip = {
 	.name = "msm_pci_msi",
-	.irq_mask = pci_msi_mask_irq,
-	.irq_unmask = pci_msi_unmask_irq,
+	.irq_mask = msm_msi_mask_irq,
+	.irq_unmask = msm_msi_unmask_irq,
 };
 
 static int msm_msi_domain_prepare(struct irq_domain *domain, struct device *dev,
@@ -207,6 +237,7 @@
 				msi->irqs[pos].hwirq,
 				&msm_msi_bottom_irq_chip, client,
 				handle_simple_irq, NULL, NULL);
+		irq_set_status_flags(msi->irqs[pos].virq, IRQ_DISABLE_UNLAZY);
 		client->nr_irqs++;
 		pos++;
 	}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index bef17c3..33f3f47 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1251,30 +1251,29 @@
 		return 0;
 	}
 
-	if (!pm || !pm->runtime_suspend)
-		return -ENOSYS;
-
 	pci_dev->state_saved = false;
-	error = pm->runtime_suspend(dev);
-	if (error) {
+	if (pm && pm->runtime_suspend) {
+		error = pm->runtime_suspend(dev);
 		/*
 		 * -EBUSY and -EAGAIN is used to request the runtime PM core
 		 * to schedule a new suspend, so log the event only with debug
 		 * log level.
 		 */
-		if (error == -EBUSY || error == -EAGAIN)
+		if (error == -EBUSY || error == -EAGAIN) {
 			dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
 				pm->runtime_suspend, error);
-		else
+			return error;
+		} else if (error) {
 			dev_err(dev, "can't suspend (%pf returned %d)\n",
 				pm->runtime_suspend, error);
-
-		return error;
+			return error;
+		}
 	}
 
 	pci_fixup_device(pci_fixup_suspend, pci_dev);
 
-	if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
+	if (pm && pm->runtime_suspend
+	    && !pci_dev->state_saved && pci_dev->current_state != PCI_D0
 	    && pci_dev->current_state != PCI_UNKNOWN) {
 		WARN_ONCE(pci_dev->current_state != prev,
 			"PCI PM: State of device not saved by %pF\n",
@@ -1292,7 +1291,7 @@
 
 static int pci_pm_runtime_resume(struct device *dev)
 {
-	int rc;
+	int rc = 0;
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
@@ -1306,14 +1305,12 @@
 	if (!pci_dev->driver)
 		return 0;
 
-	if (!pm || !pm->runtime_resume)
-		return -ENOSYS;
-
 	pci_fixup_device(pci_fixup_resume_early, pci_dev);
 	pci_enable_wake(pci_dev, PCI_D0, false);
 	pci_fixup_device(pci_fixup_resume, pci_dev);
 
-	rc = pm->runtime_resume(dev);
+	if (pm && pm->runtime_resume)
+		rc = pm->runtime_resume(dev);
 
 	pci_dev->runtime_d3cold = false;
 
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 1b10ea0..69372e2 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -30,8 +30,8 @@
 #define DDRC_FLUX_RCMD          0x38c
 #define DDRC_PRE_CMD            0x3c0
 #define DDRC_ACT_CMD            0x3c4
-#define DDRC_BNK_CHG            0x3c8
 #define DDRC_RNK_CHG            0x3cc
+#define DDRC_RW_CHG             0x3d0
 #define DDRC_EVENT_CTRL         0x6C0
 #define DDRC_INT_MASK		0x6c8
 #define DDRC_INT_STATUS		0x6cc
@@ -51,7 +51,7 @@
 
 static const u32 ddrc_reg_off[] = {
 	DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
-	DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG
+	DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
 };
 
 /*
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index 62744320..029ee04 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -221,10 +221,12 @@
 	__ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent",
 				   &phy_common->ref_clk_parent, false);
 
-	err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
-				   &phy_common->ref_clk);
-	if (err)
-		goto out;
+	/*
+	 * Some platforms may not have the ON/OFF control for reference clock,
+	 * hence this clock may be optional.
+	 */
+	__ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
+				   &phy_common->ref_clk, false);
 
 	/*
 	 * "ref_aux_clk" is optional and only supported by certain
@@ -414,11 +416,17 @@
 		}
 	}
 
-	ret = clk_prepare_enable(phy->ref_clk);
-	if (ret) {
-		dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
-				__func__, ret);
-		goto out_disable_parent;
+	/*
+	 * "ref_clk" is optional clock hence make sure that clk reference
+	 * is available before trying to enable the clock.
+	 */
+	if (phy->ref_clk) {
+		ret = clk_prepare_enable(phy->ref_clk);
+		if (ret) {
+			dev_err(phy->dev, "%s: ref_clk enable failed %d\n",
+					__func__, ret);
+			goto out_disable_parent;
+		}
 	}
 
 	/*
@@ -482,7 +490,14 @@
 		 */
 		if (phy->ref_aux_clk)
 			clk_disable_unprepare(phy->ref_aux_clk);
-		clk_disable_unprepare(phy->ref_clk);
+
+		/*
+		 * "ref_clk" is optional clock hence make sure that clk
+		 * reference is available before trying to disable the clock.
+		 */
+		if (phy->ref_clk)
+			clk_disable_unprepare(phy->ref_clk);
+
 		/*
 		 * "ref_clk_parent" is optional clock hence make sure that clk
 		 * reference is available before trying to disable the clock.
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 4f3ab18..c8eff70 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -191,7 +191,8 @@
 		case PIN_CONFIG_BIAS_DISABLE:
 			dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
 
-			meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
+			meson_calc_reg_and_bit(bank, pin, REG_PULLEN, &reg,
+					       &bit);
 			ret = regmap_update_bits(pc->reg_pullen, reg,
 						 BIT(bit), 0);
 			if (ret)
diff --git a/drivers/pinctrl/qcom/pinctrl-kona.c b/drivers/pinctrl/qcom/pinctrl-kona.c
index 4fa8bb4..c61623e 100644
--- a/drivers/pinctrl/qcom/pinctrl-kona.c
+++ b/drivers/pinctrl/qcom/pinctrl-kona.c
@@ -113,6 +113,18 @@
 		.intr_detection_bit = -1,		\
 		.intr_detection_width = -1,		\
 	}
+
+#define QUP_I3C_0_MODE_OFFSET	0x9BB000
+#define QUP_I3C_1_MODE_OFFSET	0x9BC000
+#define QUP_I3C_8_MODE_OFFSET	0x5BA000
+#define QUP_I3C_14_MODE_OFFSET	0x5BB000
+
+#define QUP_I3C(qup_mode, qup_offset)					\
+	{						\
+		.mode = qup_mode,			\
+		.offset = qup_offset,			\
+	}
+
 static const struct pinctrl_pin_desc kona_pins[] = {
 	PINCTRL_PIN(0, "GPIO_0"),
 	PINCTRL_PIN(1, "GPIO_1"),
@@ -1698,6 +1710,13 @@
 	[183] = UFS_RESET(ufs_reset, 0x5b8000),
 };
 
+static struct pinctrl_qup kona_qup_regs[] = {
+	[0] = QUP_I3C(0, QUP_I3C_0_MODE_OFFSET),
+	[1] = QUP_I3C(1, QUP_I3C_1_MODE_OFFSET),
+	[2] = QUP_I3C(8, QUP_I3C_8_MODE_OFFSET),
+	[3] = QUP_I3C(14, QUP_I3C_14_MODE_OFFSET),
+};
+
 static const struct msm_pinctrl_soc_data kona_pinctrl = {
 	.pins = kona_pins,
 	.npins = ARRAY_SIZE(kona_pins),
@@ -1706,6 +1725,8 @@
 	.groups = kona_groups,
 	.ngroups = ARRAY_SIZE(kona_groups),
 	.ngpios = 180,
+	.qup_regs = kona_qup_regs,
+	.nqup_regs = ARRAY_SIZE(kona_qup_regs),
 };
 
 static int kona_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 5a5cd95..6cb2feb 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013, Sony Mobile Communications AB.
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,7 @@
 #include <linux/reboot.h>
 #include <linux/pm.h>
 #include <linux/log2.h>
+#include <linux/bitmap.h>
 
 #include "../core.h"
 #include "../pinconf.h"
@@ -39,6 +40,7 @@
 
 #define MAX_NR_GPIO 300
 #define PS_HOLD_OFFSET 0x820
+#define QUP_MASK       GENMASK(5, 0)
 
 /**
  * struct msm_pinctrl - state for a pinctrl-msm device
@@ -1058,6 +1060,42 @@
 	.resume = msm_pinctrl_resume,
 };
 
+int msm_qup_write(u32 mode, u32 val)
+{
+	int i;
+	struct pinctrl_qup *regs = msm_pinctrl_data->soc->qup_regs;
+	int num_regs =  msm_pinctrl_data->soc->nqup_regs;
+
+	/*Iterate over modes*/
+	for (i = 0; i < num_regs; i++) {
+		if (regs[i].mode == mode) {
+			writel_relaxed(val & QUP_MASK,
+				 msm_pinctrl_data->regs + regs[i].offset);
+			return 0;
+		}
+	}
+
+	return -ENOENT;
+}
+
+int msm_qup_read(unsigned int mode)
+{
+	int i, val;
+	struct pinctrl_qup *regs = msm_pinctrl_data->soc->qup_regs;
+	int num_regs =  msm_pinctrl_data->soc->nqup_regs;
+
+	/*Iterate over modes*/
+	for (i = 0; i < num_regs; i++) {
+		if (regs[i].mode == mode) {
+			val = readl_relaxed(msm_pinctrl_data->regs +
+							 regs[i].offset);
+			return val & QUP_MASK;
+		}
+	}
+
+	return -ENOENT;
+}
+
 int msm_pinctrl_probe(struct platform_device *pdev,
 		      const struct msm_pinctrl_soc_data *soc_data)
 {
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index a1a5aab..58abe01 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -13,6 +13,8 @@
 #ifndef __PINCTRL_MSM_H__
 #define __PINCTRL_MSM_H__
 
+#include <linux/pinctrl/qcom-pinctrl.h>
+
 struct pinctrl_pin_desc;
 
 /**
@@ -99,6 +101,16 @@
 	unsigned intr_detection_width:5;
 };
 
+/*
+ * struct pinctrl_qup - Qup mode configuration
+ * @mode:	Qup i3c mode
+ * @offset:	Offset of the register
+ */
+struct pinctrl_qup {
+	u32 mode;
+	u32 offset;
+};
+
 /**
  * struct msm_pinctrl_soc_data - Qualcomm pin controller driver configuration
  * @pins:	    An array describing all pins the pin controller affects.
@@ -119,6 +131,8 @@
 	unsigned ngroups;
 	unsigned ngpios;
 	bool pull_no_keeper;
+	struct pinctrl_qup *qup_regs;
+	unsigned int nqup_regs;
 };
 
 int msm_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 56bd2c4..8172449 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -182,4 +182,42 @@
 	  module is used to configure and read the configuration from the
 	  Serial Engines.
 
+config IPA3_REGDUMP
+	bool "Dump or collect IPA/GSI register values on Linux crash"
+	depends on IPA3
+	help
+	  This option is to be used when the saving of IPA register state is
+	  desired upon a fatal system exception. When an exception occurs,
+	  an IPA register collection algorithm will be run in the context of
+	  the exception handler.  A predefined set of registers will be read
+	  and their values will be placed into a static hierarchical data
+	  structure that can be perused post crash.
+
+choice
+	prompt "Platform whose registers are to be dumped/collected"
+	depends on IPA3_REGDUMP
+	help
+	  The choices within represent the possible platforms this build is
+	  intended for. The choices are mutually exclusive.  By selecting
+	  one, you effect the inclusion path used, such that the relevant
+	  register definitions will be found.  Each platform has unique
+	  register definitions.
+
+config IPA3_REGDUMP_IPA_4_5
+	bool "The 4.5 IPA"
+	depends on IPA3_REGDUMP
+	depends on ARCH_KONA
+	help
+	  Set this to enable the 4.5 IPA's registers to be dumped/collected.
+
+endchoice
+
+config IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS
+	int "The number of extra endp registers for remaining pipes"
+	depends on IPA3_REGDUMP
+	default 0
+	help
+	  If the platform has extra endpoint registers for remaining
+	  pipes, please express how many here.
+
 endmenu
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 28a3c2a..06ea966 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -2313,7 +2313,13 @@
 		return -GSI_STATUS_NODEV;
 	}
 	memset(ctx, 0, sizeof(*ctx));
-	user_data_size = props->ring_len / props->re_size;
+
+	/* For IPA offloaded WDI channels not required user_data pointer */
+	if (props->prot != GSI_CHAN_PROT_WDI2 &&
+		props->prot != GSI_CHAN_PROT_WDI3)
+		user_data_size = props->ring_len / props->re_size;
+	else
+		user_data_size = props->re_size;
 	/*
 	 * GCI channels might have OOO event completions up to GSI_VEID_MAX.
 	 * user_data needs to be large enough to accommodate those.
@@ -2496,6 +2502,7 @@
 	mutex_unlock(&ctx->mlock);
 	return GSI_STATUS_SUCCESS;
 }
+EXPORT_SYMBOL(gsi_write_channel_scratch3_reg);
 
 static void __gsi_read_channel_scratch(unsigned long chan_hdl,
 		union __packed gsi_channel_scratch * val)
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index d1d462b..7d07e03 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -13,3 +13,9 @@
 obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
 
 obj-$(CONFIG_IPA3_MHI_PROXY) += ipa_mhi_proxy.o
+
+ipat-$(CONFIG_IPA3_REGDUMP) += dump/ipa_reg_dump.o
+
+ccflags-$(CONFIG_IPA3_REGDUMP) += -Idrivers/platform/msm/ipa/ipa_v3/dump
+
+ccflags-$(CONFIG_IPA3_REGDUMP_IPA_4_5) += -Idrivers/platform/msm/ipa/ipa_v3/dump/ipa4.5
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/gsi_hwio.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/gsi_hwio.h
new file mode 100644
index 0000000..1699699
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/gsi_hwio.h
@@ -0,0 +1,2392 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_GSI_HWIO_H_)
+#define _GSI_HWIO_H_
+/*
+ *
+ * HWIO register definitions to follow:
+ *
+ */
+#define GSI_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00004000)
+#define GSI_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + 0x00004000)
+#define GSI_REG_BASE_OFFS 0x00004000
+#define HWIO_GSI_CFG_ADDR (GSI_REG_BASE + 0x00000000)
+#define HWIO_GSI_CFG_PHYS (GSI_REG_BASE_PHYS + 0x00000000)
+#define HWIO_GSI_CFG_OFFS (GSI_REG_BASE_OFFS + 0x00000000)
+#define HWIO_GSI_CFG_RMSK 0xf3f
+#define HWIO_GSI_CFG_ATTR 0x3
+#define HWIO_GSI_CFG_IN in_dword_masked(HWIO_GSI_CFG_ADDR, \
+					HWIO_GSI_CFG_RMSK)
+#define HWIO_GSI_CFG_INM(m) in_dword_masked(HWIO_GSI_CFG_ADDR, m)
+#define HWIO_GSI_CFG_OUT(v) out_dword(HWIO_GSI_CFG_ADDR, v)
+#define HWIO_GSI_CFG_OUTM(m, v) out_dword_masked_ns(HWIO_GSI_CFG_ADDR, \
+						    m, \
+						    v, \
+						    HWIO_GSI_CFG_IN)
+#define HWIO_GSI_CFG_SLEEP_CLK_DIV_BMSK 0xf00
+#define HWIO_GSI_CFG_SLEEP_CLK_DIV_SHFT 0x8
+#define HWIO_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20
+#define HWIO_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5
+#define HWIO_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10
+#define HWIO_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4
+#define HWIO_GSI_CFG_UC_IS_MCS_BMSK 0x8
+#define HWIO_GSI_CFG_UC_IS_MCS_SHFT 0x3
+#define HWIO_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4
+#define HWIO_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT 0x2
+#define HWIO_GSI_CFG_MCS_ENABLE_BMSK 0x2
+#define HWIO_GSI_CFG_MCS_ENABLE_SHFT 0x1
+#define HWIO_GSI_CFG_GSI_ENABLE_BMSK 0x1
+#define HWIO_GSI_CFG_GSI_ENABLE_SHFT 0x0
+#define HWIO_GSI_MANAGER_MCS_CODE_VER_ADDR (GSI_REG_BASE + 0x00000008)
+#define HWIO_GSI_MANAGER_MCS_CODE_VER_PHYS (GSI_REG_BASE_PHYS + 0x00000008)
+#define HWIO_GSI_MANAGER_MCS_CODE_VER_OFFS (GSI_REG_BASE_OFFS + 0x00000008)
+#define HWIO_GSI_ZEROS_ADDR (GSI_REG_BASE + 0x00000010)
+#define HWIO_GSI_ZEROS_PHYS (GSI_REG_BASE_PHYS + 0x00000010)
+#define HWIO_GSI_ZEROS_OFFS (GSI_REG_BASE_OFFS + 0x00000010)
+#define HWIO_GSI_PERIPH_BASE_ADDR_LSB_ADDR (GSI_REG_BASE + 0x00000018)
+#define HWIO_GSI_PERIPH_BASE_ADDR_LSB_PHYS (GSI_REG_BASE_PHYS + 0x00000018)
+#define HWIO_GSI_PERIPH_BASE_ADDR_LSB_OFFS (GSI_REG_BASE_OFFS + 0x00000018)
+#define HWIO_GSI_PERIPH_BASE_ADDR_MSB_ADDR (GSI_REG_BASE + 0x0000001c)
+#define HWIO_GSI_PERIPH_BASE_ADDR_MSB_PHYS (GSI_REG_BASE_PHYS + 0x0000001c)
+#define HWIO_GSI_PERIPH_BASE_ADDR_MSB_OFFS (GSI_REG_BASE_OFFS + 0x0000001c)
+#define HWIO_GSI_PERIPH_PENDING_ADDR (GSI_REG_BASE + 0x00000020)
+#define HWIO_GSI_PERIPH_PENDING_PHYS (GSI_REG_BASE_PHYS + 0x00000020)
+#define HWIO_GSI_PERIPH_PENDING_OFFS (GSI_REG_BASE_OFFS + 0x00000020)
+#define HWIO_GSI_MOQA_CFG_ADDR (GSI_REG_BASE + 0x00000030)
+#define HWIO_GSI_MOQA_CFG_PHYS (GSI_REG_BASE_PHYS + 0x00000030)
+#define HWIO_GSI_MOQA_CFG_OFFS (GSI_REG_BASE_OFFS + 0x00000030)
+#define HWIO_GSI_REE_CFG_ADDR (GSI_REG_BASE + 0x00000038)
+#define HWIO_GSI_REE_CFG_PHYS (GSI_REG_BASE_PHYS + 0x00000038)
+#define HWIO_GSI_REE_CFG_OFFS (GSI_REG_BASE_OFFS + 0x00000038)
+#define HWIO_GSI_REE_CFG_RMSK 0xff03
+#define HWIO_GSI_REE_CFG_ATTR 0x3
+#define HWIO_GSI_REE_CFG_IN in_dword_masked(HWIO_GSI_REE_CFG_ADDR, \
+					    HWIO_GSI_REE_CFG_RMSK)
+#define HWIO_GSI_REE_CFG_INM(m) in_dword_masked(HWIO_GSI_REE_CFG_ADDR, m)
+#define HWIO_GSI_REE_CFG_OUT(v) out_dword(HWIO_GSI_REE_CFG_ADDR, v)
+#define HWIO_GSI_REE_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_GSI_REE_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_GSI_REE_CFG_IN)
+#define HWIO_GSI_REE_CFG_MAX_BURST_SIZE_BMSK 0xff00
+#define HWIO_GSI_REE_CFG_MAX_BURST_SIZE_SHFT 0x8
+#define HWIO_GSI_REE_CFG_CHANNEL_EMPTY_INT_ENABLE_BMSK 0x2
+#define HWIO_GSI_REE_CFG_CHANNEL_EMPTY_INT_ENABLE_SHFT 0x1
+#define HWIO_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_BMSK 0x1
+#define HWIO_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_SHFT 0x0
+#define HWIO_GSI_CGC_CTRL_ADDR (GSI_REG_BASE + 0x00000060)
+#define HWIO_GSI_CGC_CTRL_PHYS (GSI_REG_BASE_PHYS + 0x00000060)
+#define HWIO_GSI_CGC_CTRL_OFFS (GSI_REG_BASE_OFFS + 0x00000060)
+#define HWIO_GSI_MSI_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000080)
+#define HWIO_GSI_MSI_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000080)
+#define HWIO_GSI_MSI_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000080)
+#define HWIO_GSI_EVENT_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000084)
+#define HWIO_GSI_EVENT_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000084)
+#define HWIO_GSI_EVENT_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000084)
+#define HWIO_GSI_DATA_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000088)
+#define HWIO_GSI_DATA_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000088)
+#define HWIO_GSI_DATA_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000088)
+#define HWIO_GSI_TRE_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000090)
+#define HWIO_GSI_TRE_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000090)
+#define HWIO_GSI_TRE_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000090)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000a0)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000a0)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000a0)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000a4)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000a4)
+#define HWIO_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000a4)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000a8)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000a8)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000a8)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000ac)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000ac)
+#define HWIO_IC_GEN_EVNT_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000ac)
+#define HWIO_IC_GEN_INT_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000b0)
+#define HWIO_IC_GEN_INT_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000b0)
+#define HWIO_IC_GEN_INT_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000b0)
+#define HWIO_IC_GEN_INT_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000b4)
+#define HWIO_IC_GEN_INT_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000b4)
+#define HWIO_IC_GEN_INT_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000b4)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000b8)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000b8)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000b8)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000bc)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000bc)
+#define HWIO_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000bc)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000c0)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000c0)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000c0)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000c4)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+					       0x000000c4)
+#define HWIO_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+					       0x000000c4)
+#define HWIO_IC_TLV_STOP_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000c8)
+#define HWIO_IC_TLV_STOP_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000c8)
+#define HWIO_IC_TLV_STOP_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000c8)
+#define HWIO_IC_TLV_STOP_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000cc)
+#define HWIO_IC_TLV_STOP_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000cc)
+#define HWIO_IC_TLV_STOP_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000cc)
+#define HWIO_IC_TLV_RESET_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000d0)
+#define HWIO_IC_TLV_RESET_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000d0)
+#define HWIO_IC_TLV_RESET_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000d0)
+#define HWIO_IC_TLV_RESET_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000d4)
+#define HWIO_IC_TLV_RESET_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000d4)
+#define HWIO_IC_TLV_RESET_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000d4)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000d8)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+					      0x000000d8)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+					      0x000000d8)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000dc)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+					      0x000000dc)
+#define HWIO_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+					      0x000000dc)
+#define HWIO_IC_READ_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000e0)
+#define HWIO_IC_READ_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000e0)
+#define HWIO_IC_READ_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000e0)
+#define HWIO_IC_READ_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000e4)
+#define HWIO_IC_READ_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000e4)
+#define HWIO_IC_READ_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000e4)
+#define HWIO_IC_WRITE_BCK_PRS_LSB_ADDR (GSI_REG_BASE + 0x000000e8)
+#define HWIO_IC_WRITE_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + 0x000000e8)
+#define HWIO_IC_WRITE_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + 0x000000e8)
+#define HWIO_IC_WRITE_BCK_PRS_MSB_ADDR (GSI_REG_BASE + 0x000000ec)
+#define HWIO_IC_WRITE_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + 0x000000ec)
+#define HWIO_IC_WRITE_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + 0x000000ec)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_LSB_ADDR (GSI_REG_BASE + \
+						  0x000000f0)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_LSB_PHYS (GSI_REG_BASE_PHYS + \
+						  0x000000f0)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS (GSI_REG_BASE_OFFS + \
+						  0x000000f0)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_MSB_ADDR (GSI_REG_BASE + \
+						  0x000000f4)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_MSB_PHYS (GSI_REG_BASE_PHYS + \
+						  0x000000f4)
+#define HWIO_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS (GSI_REG_BASE_OFFS + \
+						  0x000000f4)
+#define HWIO_IC_INT_WEIGHT_REE_ADDR (GSI_REG_BASE + 0x00000100)
+#define HWIO_IC_INT_WEIGHT_REE_PHYS (GSI_REG_BASE_PHYS + 0x00000100)
+#define HWIO_IC_INT_WEIGHT_REE_OFFS (GSI_REG_BASE_OFFS + 0x00000100)
+#define HWIO_IC_INT_WEIGHT_EVT_ENG_ADDR (GSI_REG_BASE + 0x00000104)
+#define HWIO_IC_INT_WEIGHT_EVT_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000104)
+#define HWIO_IC_INT_WEIGHT_EVT_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000104)
+#define HWIO_IC_INT_WEIGHT_INT_ENG_ADDR (GSI_REG_BASE + 0x00000108)
+#define HWIO_IC_INT_WEIGHT_INT_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000108)
+#define HWIO_IC_INT_WEIGHT_INT_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000108)
+#define HWIO_IC_INT_WEIGHT_CSR_ADDR (GSI_REG_BASE + 0x0000010c)
+#define HWIO_IC_INT_WEIGHT_CSR_PHYS (GSI_REG_BASE_PHYS + 0x0000010c)
+#define HWIO_IC_INT_WEIGHT_CSR_OFFS (GSI_REG_BASE_OFFS + 0x0000010c)
+#define HWIO_IC_INT_WEIGHT_TLV_ENG_ADDR (GSI_REG_BASE + 0x00000110)
+#define HWIO_IC_INT_WEIGHT_TLV_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000110)
+#define HWIO_IC_INT_WEIGHT_TLV_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000110)
+#define HWIO_IC_INT_WEIGHT_TIMER_ENG_ADDR (GSI_REG_BASE + 0x00000114)
+#define HWIO_IC_INT_WEIGHT_TIMER_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000114)
+#define HWIO_IC_INT_WEIGHT_TIMER_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000114)
+#define HWIO_IC_INT_WEIGHT_DB_ENG_ADDR (GSI_REG_BASE + 0x00000118)
+#define HWIO_IC_INT_WEIGHT_DB_ENG_PHYS (GSI_REG_BASE_PHYS + 0x00000118)
+#define HWIO_IC_INT_WEIGHT_DB_ENG_OFFS (GSI_REG_BASE_OFFS + 0x00000118)
+#define HWIO_IC_INT_WEIGHT_RD_WR_ENG_ADDR (GSI_REG_BASE + 0x0000011c)
+#define HWIO_IC_INT_WEIGHT_RD_WR_ENG_PHYS (GSI_REG_BASE_PHYS + 0x0000011c)
+#define HWIO_IC_INT_WEIGHT_RD_WR_ENG_OFFS (GSI_REG_BASE_OFFS + 0x0000011c)
+#define HWIO_IC_INT_WEIGHT_UCONTROLLER_ENG_ADDR (GSI_REG_BASE + 0x00000120)
+#define HWIO_IC_INT_WEIGHT_UCONTROLLER_ENG_PHYS (GSI_REG_BASE_PHYS + \
+						 0x00000120)
+#define HWIO_IC_INT_WEIGHT_UCONTROLLER_ENG_OFFS (GSI_REG_BASE_OFFS + \
+						 0x00000120)
+#define HWIO_IC_INT_WEIGHT_SDMA_ADDR (GSI_REG_BASE + 0x00000124)
+#define HWIO_IC_INT_WEIGHT_SDMA_PHYS (GSI_REG_BASE_PHYS + 0x00000124)
+#define HWIO_IC_INT_WEIGHT_SDMA_OFFS (GSI_REG_BASE_OFFS + 0x00000124)
+#define HWIO_GSI_SDMA_CFG_ADDR (GSI_REG_BASE + 0x0000003c)
+#define HWIO_GSI_SDMA_CFG_PHYS (GSI_REG_BASE_PHYS + 0x0000003c)
+#define HWIO_GSI_SDMA_CFG_OFFS (GSI_REG_BASE_OFFS + 0x0000003c)
+#define HWIO_GSI_SDMA_CACHEATTR_ADDR (GSI_REG_BASE + 0x00000094)
+#define HWIO_GSI_SDMA_CACHEATTR_PHYS (GSI_REG_BASE_PHYS + 0x00000094)
+#define HWIO_GSI_SDMA_CACHEATTR_OFFS (GSI_REG_BASE_OFFS + 0x00000094)
+#define HWIO_GSI_SDMA_SG_IOVEC_LSB_n_ADDR(n) (GSI_REG_BASE + 0x00000140 + \
+					      0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_LSB_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00000140 + 0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_LSB_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00000140 + 0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_MSB_n_ADDR(n) (GSI_REG_BASE + 0x00000144 + \
+					      0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_MSB_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00000144 + 0x8 * (n))
+#define HWIO_GSI_SDMA_SG_IOVEC_MSB_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00000144 + 0x8 * (n))
+#define HWIO_GSI_MANAGER_EE_QOS_n_ADDR(n) (GSI_REG_BASE + 0x00000300 + \
+					   0x4 * (n))
+#define HWIO_GSI_MANAGER_EE_QOS_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x00000300 + 0x4 * (n))
+#define HWIO_GSI_MANAGER_EE_QOS_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x00000300 + 0x4 * (n))
+#define HWIO_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						    0x00000200)
+#define HWIO_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS +	\
+						    0x00000200)
+#define HWIO_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS +	\
+						    0x00000200)
+#define HWIO_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						    0x00000204)
+#define HWIO_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS +	\
+						    0x00000204)
+#define HWIO_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS +	\
+						    0x00000204)
+#define HWIO_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						      0x00000208)
+#define HWIO_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						      0x00000208)
+#define HWIO_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						      0x00000208)
+#define HWIO_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						      0x0000020c)
+#define HWIO_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						      0x0000020c)
+#define HWIO_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						      0x0000020c)
+#define HWIO_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						     0x00000240)
+#define HWIO_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						     0x00000240)
+#define HWIO_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						     0x00000240)
+#define HWIO_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						      0x00000244)
+#define HWIO_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						      0x00000244)
+#define HWIO_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						      0x00000244)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						       0x00000248)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						       0x00000248)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						       0x00000248)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_ADDR (GSI_REG_BASE + \
+							0x0000024c)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS \
+							+ 0x0000024c)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS \
+							+ 0x0000024c)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_ADDR (GSI_REG_BASE + \
+							0x00000250)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS \
+							+ 0x00000250)
+#define HWIO_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS \
+							+ 0x00000250)
+#define HWIO_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_ADDR (GSI_REG_BASE \
+							     + 0x00000254)
+#define HWIO_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_PHYS ( \
+		GSI_REG_BASE_PHYS + 0x00000254)
+#define HWIO_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_OFFS ( \
+		GSI_REG_BASE_OFFS + 0x00000254)
+#define HWIO_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_ADDR (GSI_REG_BASE \
+							     + 0x00000258)
+#define HWIO_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_PHYS ( \
+		GSI_REG_BASE_PHYS + 0x00000258)
+#define HWIO_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_OFFS ( \
+		GSI_REG_BASE_OFFS + 0x00000258)
+#define HWIO_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_ADDR (GSI_REG_BASE + \
+							  0x0000025c)
+#define HWIO_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_PHYS ( \
+		GSI_REG_BASE_PHYS + 0x0000025c)
+#define HWIO_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_OFFS ( \
+		GSI_REG_BASE_OFFS + 0x0000025c)
+#define HWIO_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						       0x00000260)
+#define HWIO_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						       0x00000260)
+#define HWIO_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						       0x00000260)
+#define HWIO_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_ADDR (GSI_REG_BASE + \
+						       0x00000264)
+#define HWIO_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_PHYS (GSI_REG_BASE_PHYS + \
+						       0x00000264)
+#define HWIO_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_OFFS (GSI_REG_BASE_OFFS + \
+						       0x00000264)
+#define HWIO_GSI_IRAM_PTR_CH_CMD_ADDR (GSI_REG_BASE + 0x00000400)
+#define HWIO_GSI_IRAM_PTR_CH_CMD_PHYS (GSI_REG_BASE_PHYS + 0x00000400)
+#define HWIO_GSI_IRAM_PTR_CH_CMD_OFFS (GSI_REG_BASE_OFFS + 0x00000400)
+#define HWIO_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR (GSI_REG_BASE + 0x00000404)
+#define HWIO_GSI_IRAM_PTR_EE_GENERIC_CMD_PHYS (GSI_REG_BASE_PHYS + \
+					       0x00000404)
+#define HWIO_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS (GSI_REG_BASE_OFFS + \
+					       0x00000404)
+#define HWIO_GSI_IRAM_PTR_TLV_CH_NOT_FULL_ADDR (GSI_REG_BASE + 0x00000408)
+#define HWIO_GSI_IRAM_PTR_TLV_CH_NOT_FULL_PHYS (GSI_REG_BASE_PHYS + \
+						0x00000408)
+#define HWIO_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS (GSI_REG_BASE_OFFS + \
+						0x00000408)
+#define HWIO_GSI_IRAM_PTR_CH_DB_ADDR (GSI_REG_BASE + 0x00000418)
+#define HWIO_GSI_IRAM_PTR_CH_DB_PHYS (GSI_REG_BASE_PHYS + 0x00000418)
+#define HWIO_GSI_IRAM_PTR_CH_DB_OFFS (GSI_REG_BASE_OFFS + 0x00000418)
+#define HWIO_GSI_IRAM_PTR_EV_DB_ADDR (GSI_REG_BASE + 0x0000041c)
+#define HWIO_GSI_IRAM_PTR_EV_DB_PHYS (GSI_REG_BASE_PHYS + 0x0000041c)
+#define HWIO_GSI_IRAM_PTR_EV_DB_OFFS (GSI_REG_BASE_OFFS + 0x0000041c)
+#define HWIO_GSI_IRAM_PTR_NEW_RE_ADDR (GSI_REG_BASE + 0x00000420)
+#define HWIO_GSI_IRAM_PTR_NEW_RE_PHYS (GSI_REG_BASE_PHYS + 0x00000420)
+#define HWIO_GSI_IRAM_PTR_NEW_RE_OFFS (GSI_REG_BASE_OFFS + 0x00000420)
+#define HWIO_GSI_IRAM_PTR_CH_DIS_COMP_ADDR (GSI_REG_BASE + 0x00000424)
+#define HWIO_GSI_IRAM_PTR_CH_DIS_COMP_PHYS (GSI_REG_BASE_PHYS + 0x00000424)
+#define HWIO_GSI_IRAM_PTR_CH_DIS_COMP_OFFS (GSI_REG_BASE_OFFS + 0x00000424)
+#define HWIO_GSI_IRAM_PTR_CH_EMPTY_ADDR (GSI_REG_BASE + 0x00000428)
+#define HWIO_GSI_IRAM_PTR_CH_EMPTY_PHYS (GSI_REG_BASE_PHYS + 0x00000428)
+#define HWIO_GSI_IRAM_PTR_CH_EMPTY_OFFS (GSI_REG_BASE_OFFS + 0x00000428)
+#define HWIO_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR (GSI_REG_BASE + 0x0000042c)
+#define HWIO_GSI_IRAM_PTR_EVENT_GEN_COMP_PHYS (GSI_REG_BASE_PHYS + \
+					       0x0000042c)
+#define HWIO_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS (GSI_REG_BASE_OFFS + \
+					       0x0000042c)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_ADDR (GSI_REG_BASE + \
+						   0x00000430)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_PHYS (GSI_REG_BASE_PHYS + \
+						   0x00000430)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS (GSI_REG_BASE_OFFS + \
+						   0x00000430)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_ADDR (GSI_REG_BASE + \
+						   0x00000434)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_PHYS (GSI_REG_BASE_PHYS + \
+						   0x00000434)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS (GSI_REG_BASE_OFFS + \
+						   0x00000434)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_ADDR (GSI_REG_BASE + \
+						   0x00000438)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_PHYS (GSI_REG_BASE_PHYS + \
+						   0x00000438)
+#define HWIO_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS (GSI_REG_BASE_OFFS + \
+						   0x00000438)
+#define HWIO_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR (GSI_REG_BASE + 0x0000043c)
+#define HWIO_GSI_IRAM_PTR_TIMER_EXPIRED_PHYS (GSI_REG_BASE_PHYS + \
+					      0x0000043c)
+#define HWIO_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS (GSI_REG_BASE_OFFS + \
+					      0x0000043c)
+#define HWIO_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR (GSI_REG_BASE + 0x00000440)
+#define HWIO_GSI_IRAM_PTR_WRITE_ENG_COMP_PHYS (GSI_REG_BASE_PHYS + \
+					       0x00000440)
+#define HWIO_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS (GSI_REG_BASE_OFFS + \
+					       0x00000440)
+#define HWIO_GSI_IRAM_PTR_READ_ENG_COMP_ADDR (GSI_REG_BASE + 0x00000444)
+#define HWIO_GSI_IRAM_PTR_READ_ENG_COMP_PHYS (GSI_REG_BASE_PHYS + \
+					      0x00000444)
+#define HWIO_GSI_IRAM_PTR_READ_ENG_COMP_OFFS (GSI_REG_BASE_OFFS + \
+					      0x00000444)
+#define HWIO_GSI_IRAM_PTR_UC_GP_INT_ADDR (GSI_REG_BASE + 0x00000448)
+#define HWIO_GSI_IRAM_PTR_UC_GP_INT_PHYS (GSI_REG_BASE_PHYS + 0x00000448)
+#define HWIO_GSI_IRAM_PTR_UC_GP_INT_OFFS (GSI_REG_BASE_OFFS + 0x00000448)
+#define HWIO_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR (GSI_REG_BASE + 0x0000044c)
+#define HWIO_GSI_IRAM_PTR_INT_MOD_STOPPED_PHYS (GSI_REG_BASE_PHYS + \
+						0x0000044c)
+#define HWIO_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS (GSI_REG_BASE_OFFS + \
+						0x0000044c)
+#define HWIO_GSI_IRAM_PTR_SDMA_INT_n_ADDR(n) (GSI_REG_BASE + 0x00000450 + \
+					      0x4 * (n))
+#define HWIO_GSI_IRAM_PTR_SDMA_INT_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00000450 + 0x4 * (n))
+#define HWIO_GSI_IRAM_PTR_SDMA_INT_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00000450 + 0x4 * (n))
+#define HWIO_GSI_INST_RAM_n_ADDR(n) (GSI_REG_BASE + 0x0001b000 + 0x4 * (n))
+#define HWIO_GSI_INST_RAM_n_PHYS(n) (GSI_REG_BASE_PHYS + 0x0001b000 + \
+				     0x4 * (n))
+#define HWIO_GSI_INST_RAM_n_OFFS(n) (GSI_REG_BASE_OFFS + 0x0001b000 + \
+				     0x4 * (n))
+#define HWIO_GSI_SHRAM_n_ADDR(n) (GSI_REG_BASE + 0x00002000 + 0x4 * (n))
+#define HWIO_GSI_SHRAM_n_PHYS(n) (GSI_REG_BASE_PHYS + 0x00002000 + 0x4 * \
+				  (n))
+#define HWIO_GSI_SHRAM_n_OFFS(n) (GSI_REG_BASE_OFFS + 0x00002000 + 0x4 * \
+				  (n))
+#define HWIO_GSI_SHRAM_n_RMSK 0xffffffff
+#define HWIO_GSI_SHRAM_n_MAXn 1343
+#define HWIO_GSI_SHRAM_n_ATTR 0x3
+#define HWIO_GSI_SHRAM_n_INI(n) in_dword_masked(HWIO_GSI_SHRAM_n_ADDR( \
+							n), \
+						HWIO_GSI_SHRAM_n_RMSK)
+#define HWIO_GSI_SHRAM_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_GSI_SHRAM_n_ADDR(n), \
+		mask)
+#define HWIO_GSI_SHRAM_n_OUTI(n, val) out_dword(HWIO_GSI_SHRAM_n_ADDR( \
+							n), val)
+#define HWIO_GSI_SHRAM_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_GSI_SHRAM_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_GSI_SHRAM_n_INI(n))
+#define HWIO_GSI_SHRAM_n_SHRAM_BMSK 0xffffffff
+#define HWIO_GSI_SHRAM_n_SHRAM_SHFT 0x0
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k) (GSI_REG_BASE + \
+						    0x00003800 + 0x80 *	\
+						    (n) + 0x4 * (k))
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_PHYS(n, k) (GSI_REG_BASE_PHYS +	\
+						    0x00003800 + 0x80 *	\
+						    (n) + 0x4 * (k))
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(n, k) (GSI_REG_BASE_OFFS +	\
+						    0x00003800 + 0x80 *	\
+						    (n) + 0x4 * (k))
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_RMSK 0x3f
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_MAXn 2
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_MAXk 22
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ATTR 0x3
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_INI2(n, k) in_dword_masked( \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k), \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_RMSK)
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k), \
+		mask)
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_OUTI2(n, k, val) out_dword( \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k), \
+		val)
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_OUTMI2(n, k, mask, \
+					       val) out_dword_masked_ns( \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n,	\
+						     k), \
+		mask, \
+		val, \
+		HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_INI2(n, k))
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f
+#define HWIO_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0
+#define HWIO_GSI_TEST_BUS_SEL_ADDR (GSI_REG_BASE + 0x00001000)
+#define HWIO_GSI_TEST_BUS_SEL_PHYS (GSI_REG_BASE_PHYS + 0x00001000)
+#define HWIO_GSI_TEST_BUS_SEL_OFFS (GSI_REG_BASE_OFFS + 0x00001000)
+#define HWIO_GSI_TEST_BUS_SEL_RMSK 0xf00ff
+#define HWIO_GSI_TEST_BUS_SEL_ATTR 0x3
+#define HWIO_GSI_TEST_BUS_SEL_IN in_dword_masked( \
+		HWIO_GSI_TEST_BUS_SEL_ADDR, \
+		HWIO_GSI_TEST_BUS_SEL_RMSK)
+#define HWIO_GSI_TEST_BUS_SEL_INM(m) in_dword_masked( \
+		HWIO_GSI_TEST_BUS_SEL_ADDR, \
+		m)
+#define HWIO_GSI_TEST_BUS_SEL_OUT(v) out_dword(HWIO_GSI_TEST_BUS_SEL_ADDR, \
+					       v)
+#define HWIO_GSI_TEST_BUS_SEL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_GSI_TEST_BUS_SEL_ADDR, \
+		m, \
+		v, \
+		HWIO_GSI_TEST_BUS_SEL_IN)
+#define HWIO_GSI_TEST_BUS_SEL_GSI_HW_EVENTS_SEL_BMSK 0xf0000
+#define HWIO_GSI_TEST_BUS_SEL_GSI_HW_EVENTS_SEL_SHFT 0x10
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_BMSK 0xff
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SHFT 0x0
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_ZEROS_FVAL 0x0
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_0_FVAL 0x1
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_1_FVAL 0x2
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_2_FVAL 0x3
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_3_FVAL 0x4
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_4_FVAL 0x5
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_DB_ENG_FVAL 0x9
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_0_FVAL 0xb
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_1_FVAL 0xc
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_2_FVAL 0xd
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_3_FVAL 0xe
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_4_FVAL 0xf
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_5_FVAL 0x10
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_6_FVAL 0x11
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_7_FVAL 0x12
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_0_FVAL 0x13
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_1_FVAL 0x14
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_2_FVAL 0x15
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_3_FVAL 0x16
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_4_FVAL 0x17
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_5_FVAL 0x18
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_0_FVAL 0x1b
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_1_FVAL 0x1c
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_2_FVAL 0x1d
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_0_FVAL 0x1f
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_1_FVAL 0x20
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_2_FVAL 0x21
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_3_FVAL 0x22
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_4_FVAL 0x23
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_0_FVAL 0x27
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_1_FVAL 0x28
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_2_FVAL 0x29
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_3_FVAL 0x2a
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_0_FVAL 0x2b
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_1_FVAL 0x2c
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_2_FVAL 0x2d
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_3_FVAL 0x2e
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_0_FVAL 0x33
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_1_FVAL 0x34
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_2_FVAL 0x35
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_3_FVAL 0x36
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_FVAL 0x3a
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SDMA_0_FVAL 0x3c
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SMDA_1_FVAL 0x3d
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_1_FVAL 0x3e
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_2_FVAL 0x3f
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_5_FVAL 0x40
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_5_FVAL 0x41
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_3_FVAL 0x42
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TLV_0_FVAL 0x43
+#define HWIO_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_8_FVAL 0x44
+#define HWIO_GSI_TEST_BUS_REG_ADDR (GSI_REG_BASE + 0x00001008)
+#define HWIO_GSI_TEST_BUS_REG_PHYS (GSI_REG_BASE_PHYS + 0x00001008)
+#define HWIO_GSI_TEST_BUS_REG_OFFS (GSI_REG_BASE_OFFS + 0x00001008)
+#define HWIO_GSI_TEST_BUS_REG_RMSK 0xffffffff
+#define HWIO_GSI_TEST_BUS_REG_ATTR 0x1
+#define HWIO_GSI_TEST_BUS_REG_IN in_dword_masked( \
+		HWIO_GSI_TEST_BUS_REG_ADDR, \
+		HWIO_GSI_TEST_BUS_REG_RMSK)
+#define HWIO_GSI_TEST_BUS_REG_INM(m) in_dword_masked( \
+		HWIO_GSI_TEST_BUS_REG_ADDR, \
+		m)
+#define HWIO_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_BMSK 0xffffffff
+#define HWIO_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_SHFT 0x0
+#define HWIO_GSI_DEBUG_BUSY_REG_ADDR (GSI_REG_BASE + 0x00001010)
+#define HWIO_GSI_DEBUG_BUSY_REG_PHYS (GSI_REG_BASE_PHYS + 0x00001010)
+#define HWIO_GSI_DEBUG_BUSY_REG_OFFS (GSI_REG_BASE_OFFS + 0x00001010)
+#define HWIO_GSI_DEBUG_EVENT_PENDING_ADDR (GSI_REG_BASE + 0x00001014)
+#define HWIO_GSI_DEBUG_EVENT_PENDING_PHYS (GSI_REG_BASE_PHYS + 0x00001014)
+#define HWIO_GSI_DEBUG_EVENT_PENDING_OFFS (GSI_REG_BASE_OFFS + 0x00001014)
+#define HWIO_GSI_DEBUG_TIMER_PENDING_ADDR (GSI_REG_BASE + 0x00001018)
+#define HWIO_GSI_DEBUG_TIMER_PENDING_PHYS (GSI_REG_BASE_PHYS + 0x00001018)
+#define HWIO_GSI_DEBUG_TIMER_PENDING_OFFS (GSI_REG_BASE_OFFS + 0x00001018)
+#define HWIO_GSI_DEBUG_RD_WR_PENDING_ADDR (GSI_REG_BASE + 0x0000101c)
+#define HWIO_GSI_DEBUG_RD_WR_PENDING_PHYS (GSI_REG_BASE_PHYS + 0x0000101c)
+#define HWIO_GSI_DEBUG_RD_WR_PENDING_OFFS (GSI_REG_BASE_OFFS + 0x0000101c)
+#define HWIO_GSI_DEBUG_COUNTER_CFGn_ADDR(n) (GSI_REG_BASE + 0x00001200 + \
+					     0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTER_CFGn_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00001200 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTER_CFGn_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00001200 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTERn_ADDR(n) (GSI_REG_BASE + 0x00001240 + 0x4 * \
+					 (n))
+#define HWIO_GSI_DEBUG_COUNTERn_PHYS(n) (GSI_REG_BASE_PHYS + 0x00001240 + \
+					 0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTERn_OFFS(n) (GSI_REG_BASE_OFFS + 0x00001240 + \
+					 0x4 * (n))
+#define HWIO_GSI_DEBUG_COUNTERn_RMSK 0xffff
+#define HWIO_GSI_DEBUG_COUNTERn_MAXn 7
+#define HWIO_GSI_DEBUG_COUNTERn_ATTR 0x1
+#define HWIO_GSI_DEBUG_COUNTERn_INI(n) in_dword_masked(	\
+		HWIO_GSI_DEBUG_COUNTERn_ADDR(n), \
+		HWIO_GSI_DEBUG_COUNTERn_RMSK)
+#define HWIO_GSI_DEBUG_COUNTERn_INMI(n, mask) in_dword_masked( \
+		HWIO_GSI_DEBUG_COUNTERn_ADDR(n), \
+		mask)
+#define HWIO_GSI_DEBUG_COUNTERn_COUNTER_VALUE_BMSK 0xffff
+#define HWIO_GSI_DEBUG_COUNTERn_COUNTER_VALUE_SHFT 0x0
+#define HWIO_GSI_DEBUG_PC_FROM_SW_ADDR (GSI_REG_BASE + 0x00001040)
+#define HWIO_GSI_DEBUG_PC_FROM_SW_PHYS (GSI_REG_BASE_PHYS + 0x00001040)
+#define HWIO_GSI_DEBUG_PC_FROM_SW_OFFS (GSI_REG_BASE_OFFS + 0x00001040)
+#define HWIO_GSI_DEBUG_SW_STALL_ADDR (GSI_REG_BASE + 0x00001044)
+#define HWIO_GSI_DEBUG_SW_STALL_PHYS (GSI_REG_BASE_PHYS + 0x00001044)
+#define HWIO_GSI_DEBUG_SW_STALL_OFFS (GSI_REG_BASE_OFFS + 0x00001044)
+#define HWIO_GSI_DEBUG_PC_FOR_DEBUG_ADDR (GSI_REG_BASE + 0x00001048)
+#define HWIO_GSI_DEBUG_PC_FOR_DEBUG_PHYS (GSI_REG_BASE_PHYS + 0x00001048)
+#define HWIO_GSI_DEBUG_PC_FOR_DEBUG_OFFS (GSI_REG_BASE_OFFS + 0x00001048)
+#define HWIO_GSI_DEBUG_QSB_LOG_SEL_ADDR (GSI_REG_BASE + 0x00001050)
+#define HWIO_GSI_DEBUG_QSB_LOG_SEL_PHYS (GSI_REG_BASE_PHYS + 0x00001050)
+#define HWIO_GSI_DEBUG_QSB_LOG_SEL_OFFS (GSI_REG_BASE_OFFS + 0x00001050)
+#define HWIO_GSI_DEBUG_QSB_LOG_CLR_ADDR (GSI_REG_BASE + 0x00001058)
+#define HWIO_GSI_DEBUG_QSB_LOG_CLR_PHYS (GSI_REG_BASE_PHYS + 0x00001058)
+#define HWIO_GSI_DEBUG_QSB_LOG_CLR_OFFS (GSI_REG_BASE_OFFS + 0x00001058)
+#define HWIO_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ADDR (GSI_REG_BASE + 0x00001060)
+#define HWIO_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_PHYS (GSI_REG_BASE_PHYS + \
+						 0x00001060)
+#define HWIO_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_OFFS (GSI_REG_BASE_OFFS + \
+						 0x00001060)
+#define HWIO_GSI_DEBUG_QSB_LOG_0_ADDR (GSI_REG_BASE + 0x00001064)
+#define HWIO_GSI_DEBUG_QSB_LOG_0_PHYS (GSI_REG_BASE_PHYS + 0x00001064)
+#define HWIO_GSI_DEBUG_QSB_LOG_0_OFFS (GSI_REG_BASE_OFFS + 0x00001064)
+#define HWIO_GSI_DEBUG_QSB_LOG_1_ADDR (GSI_REG_BASE + 0x00001068)
+#define HWIO_GSI_DEBUG_QSB_LOG_1_PHYS (GSI_REG_BASE_PHYS + 0x00001068)
+#define HWIO_GSI_DEBUG_QSB_LOG_1_OFFS (GSI_REG_BASE_OFFS + 0x00001068)
+#define HWIO_GSI_DEBUG_QSB_LOG_2_ADDR (GSI_REG_BASE + 0x0000106c)
+#define HWIO_GSI_DEBUG_QSB_LOG_2_PHYS (GSI_REG_BASE_PHYS + 0x0000106c)
+#define HWIO_GSI_DEBUG_QSB_LOG_2_OFFS (GSI_REG_BASE_OFFS + 0x0000106c)
+#define HWIO_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_ADDR(n) (GSI_REG_BASE + \
+						      0x00001070 + 0x4 * \
+						      (n))
+#define HWIO_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_PHYS(n) (GSI_REG_BASE_PHYS + \
+						      0x00001070 + 0x4 * \
+						      (n))
+#define HWIO_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_OFFS(n) (GSI_REG_BASE_OFFS + \
+						      0x00001070 + 0x4 * \
+						      (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_WRITE_ADDR(n) (GSI_REG_BASE + 0x00001080 + \
+					      0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_WRITE_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00001080 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_WRITE_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00001080 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_ADDR(n) (GSI_REG_BASE + 0x00001100 + \
+					     0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00001100 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00001100 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_RMSK 0xffffffff
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_MAXn 31
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_ATTR 0x1
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_INI(n) in_dword_masked( \
+		HWIO_GSI_DEBUG_SW_RF_n_READ_ADDR(n), \
+		HWIO_GSI_DEBUG_SW_RF_n_READ_RMSK)
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_INMI(n, mask) in_dword_masked( \
+		HWIO_GSI_DEBUG_SW_RF_n_READ_ADDR(n), \
+		mask)
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_RF_REG_BMSK 0xffffffff
+#define HWIO_GSI_DEBUG_SW_RF_n_READ_RF_REG_SHFT 0x0
+#define HWIO_GSI_DEBUG_EE_n_CH_k_VP_TABLE_ADDR(n, k) (GSI_REG_BASE + \
+						      0x00001400 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						      0x00001400 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_CH_k_VP_TABLE_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						      0x00001400 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, k) (GSI_REG_BASE + \
+						      0x00001600 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						      0x00001600 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						      0x00001600 + 0x80 * \
+						      (n) + 0x4 * (k))
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK 0x3f
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXn 3
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXk 19
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ATTR 0x1
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_INI2(n, k) in_dword_masked( \
+		HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, k), \
+		HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK)
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_INMI2(n, k, \
+						mask) in_dword_masked( \
+		HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, \
+						       k), \
+		mask)
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_BMSK 0x20
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_SHFT 0x5
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_BMSK 0x1f
+#define HWIO_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_SHFT 0x0
+#define HWIO_GSI_DEBUG_SDMA_TRANS_DB_n_ADDR(n) (GSI_REG_BASE + \
+						0x00001800 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SDMA_TRANS_DB_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+						0x00001800 + 0x4 * (n))
+#define HWIO_GSI_DEBUG_SDMA_TRANS_DB_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+						0x00001800 + 0x4 * (n))
+#define HWIO_GSI_UC_SRC_IRQ_ADDR (GSI_REG_BASE + 0x00000500)
+#define HWIO_GSI_UC_SRC_IRQ_PHYS (GSI_REG_BASE_PHYS + 0x00000500)
+#define HWIO_GSI_UC_SRC_IRQ_OFFS (GSI_REG_BASE_OFFS + 0x00000500)
+#define HWIO_GSI_UC_SRC_IRQ_MSK_ADDR (GSI_REG_BASE + 0x00000504)
+#define HWIO_GSI_UC_SRC_IRQ_MSK_PHYS (GSI_REG_BASE_PHYS + 0x00000504)
+#define HWIO_GSI_UC_SRC_IRQ_MSK_OFFS (GSI_REG_BASE_OFFS + 0x00000504)
+#define HWIO_GSI_UC_SRC_IRQ_CLR_ADDR (GSI_REG_BASE + 0x00000508)
+#define HWIO_GSI_UC_SRC_IRQ_CLR_PHYS (GSI_REG_BASE_PHYS + 0x00000508)
+#define HWIO_GSI_UC_SRC_IRQ_CLR_OFFS (GSI_REG_BASE_OFFS + 0x00000508)
+#define HWIO_GSI_ACC_ARGS_n_ADDR(n) (GSI_REG_BASE + 0x0000050c + 0x4 * (n))
+#define HWIO_GSI_ACC_ARGS_n_PHYS(n) (GSI_REG_BASE_PHYS + 0x0000050c + \
+				     0x4 * (n))
+#define HWIO_GSI_ACC_ARGS_n_OFFS(n) (GSI_REG_BASE_OFFS + 0x0000050c + \
+				     0x4 * (n))
+#define HWIO_GSI_ACC_ROUTINE_ADDR (GSI_REG_BASE + 0x00000524)
+#define HWIO_GSI_ACC_ROUTINE_PHYS (GSI_REG_BASE_PHYS + 0x00000524)
+#define HWIO_GSI_ACC_ROUTINE_OFFS (GSI_REG_BASE_OFFS + 0x00000524)
+#define HWIO_GSI_ACC_GO_ADDR (GSI_REG_BASE + 0x00000528)
+#define HWIO_GSI_ACC_GO_PHYS (GSI_REG_BASE_PHYS + 0x00000528)
+#define HWIO_GSI_ACC_GO_OFFS (GSI_REG_BASE_OFFS + 0x00000528)
+#define HWIO_GSI_ACC_2_UC_MCS_STTS_ADDR (GSI_REG_BASE + 0x0000052c)
+#define HWIO_GSI_ACC_2_UC_MCS_STTS_PHYS (GSI_REG_BASE_PHYS + 0x0000052c)
+#define HWIO_GSI_ACC_2_UC_MCS_STTS_OFFS (GSI_REG_BASE_OFFS + 0x0000052c)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_LSB_ADDR (GSI_REG_BASE + 0x00000530)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_LSB_PHYS (GSI_REG_BASE_PHYS + \
+						0x00000530)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_LSB_OFFS (GSI_REG_BASE_OFFS + \
+						0x00000530)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_MSB_ADDR (GSI_REG_BASE + 0x00000534)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_MSB_PHYS (GSI_REG_BASE_PHYS + \
+						0x00000534)
+#define HWIO_GSI_ACC_2_UC_MCS_RET_VAL_MSB_OFFS (GSI_REG_BASE_OFFS + \
+						0x00000534)
+#define HWIO_GSI_IC_2_UC_MCS_VLD_ADDR (GSI_REG_BASE + 0x00000538)
+#define HWIO_GSI_IC_2_UC_MCS_VLD_PHYS (GSI_REG_BASE_PHYS + 0x00000538)
+#define HWIO_GSI_IC_2_UC_MCS_VLD_OFFS (GSI_REG_BASE_OFFS + 0x00000538)
+#define HWIO_GSI_IC_2_UC_MCS_PC_ADDR (GSI_REG_BASE + 0x0000053c)
+#define HWIO_GSI_IC_2_UC_MCS_PC_PHYS (GSI_REG_BASE_PHYS + 0x0000053c)
+#define HWIO_GSI_IC_2_UC_MCS_PC_OFFS (GSI_REG_BASE_OFFS + 0x0000053c)
+#define HWIO_GSI_IC_2_UC_MCS_ARGS_n_ADDR(n) (GSI_REG_BASE + 0x00000540 + \
+					     0x4 * (n))
+#define HWIO_GSI_IC_2_UC_MCS_ARGS_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00000540 + 0x4 * (n))
+#define HWIO_GSI_IC_2_UC_MCS_ARGS_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00000540 + 0x4 * (n))
+#define HWIO_GSI_UC_TLV_IN_VLD_ADDR (GSI_REG_BASE + 0x00000558)
+#define HWIO_GSI_UC_TLV_IN_VLD_PHYS (GSI_REG_BASE_PHYS + 0x00000558)
+#define HWIO_GSI_UC_TLV_IN_VLD_OFFS (GSI_REG_BASE_OFFS + 0x00000558)
+#define HWIO_GSI_UC_TLV_IN_ROUTINE_ADDR (GSI_REG_BASE + 0x0000055c)
+#define HWIO_GSI_UC_TLV_IN_ROUTINE_PHYS (GSI_REG_BASE_PHYS + 0x0000055c)
+#define HWIO_GSI_UC_TLV_IN_ROUTINE_OFFS (GSI_REG_BASE_OFFS + 0x0000055c)
+#define HWIO_GSI_UC_TLV_IN_ARGS_n_ADDR(n) (GSI_REG_BASE + 0x00000560 + \
+					   0x4 * (n))
+#define HWIO_GSI_UC_TLV_IN_ARGS_n_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x00000560 + 0x4 * (n))
+#define HWIO_GSI_UC_TLV_IN_ARGS_n_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x00000560 + 0x4 * (n))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k) (GSI_REG_BASE + 0x0000f000 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f000 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f000 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_RMSK 0xfff7ffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_0_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_NOT_ALLOCATED_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_ALLOCATED_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STARTED_FVAL 0x2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STOPPED_FVAL 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STOP_IN_PROC_FVAL 0x4
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_ERROR_FVAL 0xf
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK 0x7c000
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT 0xe
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK 0x2000
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT 0xd
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHID_BMSK 0x1f00
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHID_SHFT 0x8
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_EE_SHFT 0x4
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK 0x8
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_INBOUND_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_OUTBOUND_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK 0x7
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MHI_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_XHCI_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_GPI_FVAL 0x2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_XDCI_FVAL 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k) (GSI_REG_BASE + 0x0000f004 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f004 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f004 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_RMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_1_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k) (GSI_REG_BASE + 0x0000f008 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f008 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f008 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_2_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k) (GSI_REG_BASE + 0x0000f00c + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f00c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f00c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_3_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k) (GSI_REG_BASE + 0x0000f010 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f010 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f010 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_CNTXT_4_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, k) (GSI_REG_BASE + 0x0000f014 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f014 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f014 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_ATTR 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_5_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, k) (GSI_REG_BASE + 0x0000f018 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f018 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f018 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_ATTR 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_6_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, k) (GSI_REG_BASE + 0x0000f01c + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0000f01c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0000f01c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_ATTR 0x1
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_CNTXT_7_RMSK)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k) (GSI_REG_BASE +	\
+							 0x0000f054 + \
+							 0x4000 * (n) +	\
+							 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_PHYS(n, \
+						  k) (GSI_REG_BASE_PHYS + \
+						      0x0000f054 + \
+						      0x4000 * (n) + \
+						      0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(n, \
+						  k) (GSI_REG_BASE_OFFS + \
+						      0x0000f054 + \
+						      0x4000 * (n) + \
+						      0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INMI2(n, k, \
+						   mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, \
+							  k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OUTMI2(n, k, mask,	\
+						    val) \
+	out_dword_masked_ns(HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR( \
+				    n, \
+				    k), mask, val, \
+			    HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_BMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, k) (GSI_REG_BASE + \
+							  0x0000f058 + \
+							  0x4000 * (n) + \
+							  0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_PHYS(n, \
+						   k) (GSI_REG_BASE_PHYS + \
+						       0x0000f058 + \
+						       0x4000 * (n) + \
+						       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(n, \
+						   k) (GSI_REG_BASE_OFFS + \
+						       0x0000f058 + \
+						       0x4000 * (n) + \
+						       0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INI2(n, \
+								       k))
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_BMSK 0xffff
+#define HWIO_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k) (GSI_REG_BASE + 0x0000f05c + \
+					   0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_QOS_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					   0x0000f05c + 0x4000 * (n) + \
+					   0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_QOS_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					   0x0000f05c + 0x4000 * (n) + \
+					   0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_QOS_RMSK 0xff3f0f
+#define HWIO_EE_n_GSI_CH_k_QOS_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_QOS_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_QOS_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_QOS_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_QOS_RMSK)
+#define HWIO_EE_n_GSI_CH_k_QOS_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_QOS_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_QOS_OUTMI2(n, k, mask, val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_QOS_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK 0xff0000
+#define HWIO_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT 0x10
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK 0x3c00
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT 0xa
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_USE_PREFETCH_BUFS_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_ESCAPE_BUF_ONLY_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SMART_PRE_FETCH_FVAL 0x2
+#define HWIO_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_FREE_PRE_FETCH_FVAL 0x3
+#define HWIO_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200
+#define HWIO_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9
+#define HWIO_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100
+#define HWIO_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8
+#define HWIO_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_ONE_PREFETCH_SEG_FVAL 0x0
+#define HWIO_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_TWO_PREFETCH_SEG_FVAL 0x1
+#define HWIO_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf
+#define HWIO_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x0000f060 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x0000f060 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x0000f060 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_RMSK)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_INMI2(n, k, mask) in_dword_masked(	\
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_OUTMI2(n, k, mask,	\
+					    val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, \
+						  k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_0_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x0000f064 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x0000f064 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x0000f064 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_RMSK)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_INMI2(n, k, mask) in_dword_masked(	\
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_OUTMI2(n, k, mask,	\
+					    val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, \
+						  k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_1_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x0000f068 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x0000f068 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x0000f068 + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_RMSK)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_INMI2(n, k, mask) in_dword_masked(	\
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_OUTMI2(n, k, mask,	\
+					    val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, \
+						  k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_2_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x0000f06c + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x0000f06c + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x0000f06c + 0x4000 * \
+						 (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_RMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_MAXn 2
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_MAXk 22
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_ATTR 0x3
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k), \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_RMSK)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_INMI2(n, k, mask) in_dword_masked(	\
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_OUTMI2(n, k, mask,	\
+					    val) out_dword_masked_ns( \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, \
+						  k), \
+		mask, \
+		val, \
+		HWIO_EE_n_GSI_CH_k_SCRATCH_3_INI2(n, k))
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_ADDR(n, k) (GSI_REG_BASE + \
+							0x0000f070 + \
+							0x4000 * (n) + \
+							0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_PHYS(n, \
+						 k) (GSI_REG_BASE_PHYS + \
+						     0x0000f070 + 0x4000 * \
+						     (n) + 0x80 * (k))
+#define HWIO_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_OFFS(n, \
+						 k) (GSI_REG_BASE_OFFS + \
+						     0x0000f070 + 0x4000 * \
+						     (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k) (GSI_REG_BASE + 0x00010000 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010000 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010000 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_RMSK 0xfff1ffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_0_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHSTATE_NOT_ALLOCATED_FVAL 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHSTATE_ALLOCATED_FVAL 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK 0x10000
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT 0x10
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INTYPE_MSI_FVAL 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_INTYPE_IRQ_FVAL 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_EVCHID_BMSK 0xff00
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_EVCHID_SHFT 0x8
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_EE_SHFT 0x4
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK 0xf
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_MHI_EV_FVAL 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_XHCI_EV_FVAL 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_GPI_EV_FVAL 0x2
+#define HWIO_EE_n_EV_CH_k_CNTXT_0_CHTYPE_XDCI_FVAL 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k) (GSI_REG_BASE + 0x00010004 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010004 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010004 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_RMSK 0xffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_1_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k) (GSI_REG_BASE + 0x00010008 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010008 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010008 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_2_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k) (GSI_REG_BASE + 0x0001000c + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x0001000c + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x0001000c + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_3_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k) (GSI_REG_BASE + 0x00010010 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010010 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010010 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_4_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_ADDR(n, k) (GSI_REG_BASE + 0x00010014 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010014 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010014 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_ATTR 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_5_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_5_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_5_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_ADDR(n, k) (GSI_REG_BASE + 0x00010018 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010018 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010018 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_ATTR 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_6_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_6_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_6_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_ADDR(n, k) (GSI_REG_BASE + 0x0001001c + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x0001001c + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x0001001c + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_ATTR 0x1
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_7_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_7_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_7_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k) (GSI_REG_BASE + 0x00010020 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010020 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010020 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_8_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_BMSK 0xff000000
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_SHFT 0x18
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK 0xff0000
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT 0x10
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK 0xffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k) (GSI_REG_BASE + 0x00010024 + \
+					      0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					      0x00010024 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					      0x00010024 + 0x4000 * (n) + \
+					      0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_OUTMI2(n, k, mask, \
+					 val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_ADDR(n, \
+					       k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_9_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k) (GSI_REG_BASE + 0x00010028 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x00010028 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x00010028 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_10_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k) (GSI_REG_BASE + 0x0001002c + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x0001002c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x0001002c + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_11_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k) (GSI_REG_BASE + 0x00010030 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x00010030 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x00010030 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_12_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k) (GSI_REG_BASE + 0x00010034 + \
+					       0x4000 * (n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+					       0x00010034 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+					       0x00010034 + 0x4000 * (n) + \
+					       0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_MAXn 2
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_MAXk 19
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_INI2(n, k) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_RMSK)
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		mask)
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_OUTI2(n, k, val) out_dword( \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		val)
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_OUTMI2(n, k, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_ADDR(n, \
+						k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_CNTXT_13_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k) (GSI_REG_BASE + \
+						0x00010048 + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						0x00010048 + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						0x00010048 + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_MAXn 2
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_MAXk 19
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_INI2(n, k) in_dword_masked(	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k),	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_RMSK)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k),	\
+		mask)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_OUTI2(n, k, val) out_dword(	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k),	\
+		val)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_OUTMI2(n, k, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, \
+						 k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_SCRATCH_0_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k) (GSI_REG_BASE + \
+						0x0001004c + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						0x0001004c + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						0x0001004c + 0x4000 * \
+						(n) + 0x80 * (k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_MAXn 2
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_MAXk 19
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_ATTR 0x3
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_INI2(n, k) in_dword_masked(	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k),	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_RMSK)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_INMI2(n, k, mask) in_dword_masked( \
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k),	\
+		mask)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_OUTI2(n, k, val) out_dword(	\
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k),	\
+		val)
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_OUTMI2(n, k, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, \
+						 k), \
+		mask, \
+		val, \
+		HWIO_EE_n_EV_CH_k_SCRATCH_1_INI2(n, k))
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_0_ADDR(n, k) (GSI_REG_BASE + \
+						  0x00011000 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						  0x00011000 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						  0x00011000 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_1_ADDR(n, k) (GSI_REG_BASE + \
+						  0x00011004 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						  0x00011004 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_CH_k_DOORBELL_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						  0x00011004 + 0x4000 *	\
+						  (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_0_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x00011100 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_0_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x00011100 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_0_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x00011100 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_1_ADDR(n, k) (GSI_REG_BASE +	\
+						 0x00011104 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_1_PHYS(n, k) (GSI_REG_BASE_PHYS + \
+						 0x00011104 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_EV_CH_k_DOORBELL_1_OFFS(n, k) (GSI_REG_BASE_OFFS + \
+						 0x00011104 + 0x4000 * \
+						 (n) + 0x8 * (k))
+#define HWIO_EE_n_GSI_STATUS_ADDR(n) (GSI_REG_BASE + 0x00012000 + 0x4000 * \
+				      (n))
+#define HWIO_EE_n_GSI_STATUS_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012000 + \
+				      0x4000 * (n))
+#define HWIO_EE_n_GSI_STATUS_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012000 + \
+				      0x4000 * (n))
+#define HWIO_EE_n_GSI_STATUS_RMSK 0x1
+#define HWIO_EE_n_GSI_STATUS_MAXn 2
+#define HWIO_EE_n_GSI_STATUS_ATTR 0x1
+#define HWIO_EE_n_GSI_STATUS_INI(n) in_dword_masked( \
+		HWIO_EE_n_GSI_STATUS_ADDR(n), \
+		HWIO_EE_n_GSI_STATUS_RMSK)
+#define HWIO_EE_n_GSI_STATUS_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_GSI_STATUS_ADDR(n), \
+		mask)
+#define HWIO_EE_n_GSI_STATUS_ENABLED_BMSK 0x1
+#define HWIO_EE_n_GSI_STATUS_ENABLED_SHFT 0x0
+#define HWIO_EE_n_GSI_CH_CMD_ADDR(n) (GSI_REG_BASE + 0x00012008 + 0x4000 * \
+				      (n))
+#define HWIO_EE_n_GSI_CH_CMD_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012008 + \
+				      0x4000 * (n))
+#define HWIO_EE_n_GSI_CH_CMD_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012008 + \
+				      0x4000 * (n))
+#define HWIO_EE_n_EV_CH_CMD_ADDR(n) (GSI_REG_BASE + 0x00012010 + 0x4000 * \
+				     (n))
+#define HWIO_EE_n_EV_CH_CMD_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012010 + \
+				     0x4000 * (n))
+#define HWIO_EE_n_EV_CH_CMD_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012010 + \
+				     0x4000 * (n))
+#define HWIO_EE_n_GSI_EE_GENERIC_CMD_ADDR(n) (GSI_REG_BASE + 0x00012018 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_GSI_EE_GENERIC_CMD_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012018 + 0x4000 * (n))
+#define HWIO_EE_n_GSI_EE_GENERIC_CMD_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012018 + 0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_0_ADDR(n) (GSI_REG_BASE + 0x00012038 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_0_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012038 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_0_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012038 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_1_ADDR(n) (GSI_REG_BASE + 0x0001203c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_1_PHYS(n) (GSI_REG_BASE_PHYS + 0x0001203c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_1_OFFS(n) (GSI_REG_BASE_OFFS + 0x0001203c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_2_ADDR(n) (GSI_REG_BASE + 0x00012040 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_2_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012040 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_2_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012040 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_SW_VERSION_ADDR(n) (GSI_REG_BASE + 0x00012044 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_SW_VERSION_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012044 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_SW_VERSION_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012044 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_MCS_CODE_VER_ADDR(n) (GSI_REG_BASE + 0x00012048 +	\
+					    0x4000 * (n))
+#define HWIO_EE_n_GSI_MCS_CODE_VER_PHYS(n) (GSI_REG_BASE_PHYS +	\
+					    0x00012048 + 0x4000 * (n))
+#define HWIO_EE_n_GSI_MCS_CODE_VER_OFFS(n) (GSI_REG_BASE_OFFS +	\
+					    0x00012048 + 0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_3_ADDR(n) (GSI_REG_BASE + 0x0001204c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_3_PHYS(n) (GSI_REG_BASE_PHYS + 0x0001204c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_GSI_HW_PARAM_3_OFFS(n) (GSI_REG_BASE_OFFS + 0x0001204c + \
+					  0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_ADDR(n) (GSI_REG_BASE + 0x00012080 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012080 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012080 + \
+					  0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_RMSK 0x7f
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MAXn 2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_ATTR 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_ADDR(n), \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_RMSK)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INMI(n, mask) in_dword_masked(	\
+		HWIO_EE_n_CNTXT_TYPE_IRQ_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK 0x40
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_GENERAL_SHFT 0x6
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK 0x20
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_SHFT 0x5
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK 0x10
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_SHFT 0x4
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK 0x8
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_IEOB_SHFT 0x3
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK 0x4
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_SHFT 0x2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK 0x2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_SHFT 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_SHFT 0x0
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n) (GSI_REG_BASE + 0x00012088 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012088 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012088 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK 0x7f
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_MAXn 2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ATTR 0x3
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INI(n))
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_BMSK 0x40
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_SHFT 0x6
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_BMSK 0x20
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_SHFT 0x5
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_BMSK 0x10
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_SHFT 0x4
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_BMSK 0x8
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_SHFT 0x3
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_BMSK 0x4
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_SHFT 0x2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_BMSK 0x2
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_SHFT 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_BMSK 0x1
+#define HWIO_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n) (GSI_REG_BASE + \
+						0x00012090 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+						0x00012090 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+						0x00012090 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_ATTR 0x1
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n),	\
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n),	\
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n) (GSI_REG_BASE + 0x00012094 + \
+					       0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+					       0x00012094 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+					       0x00012094 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_ATTR 0x1
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						    0x00012098 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS +	\
+						    0x00012098 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS +	\
+						    0x00012098 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK 0x7fffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OUTMI(n, mask, \
+						 val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INI(n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \
+	0x7fffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						   0x0001209c + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS + \
+						   0x0001209c + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS + \
+						   0x0001209c + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK 0xfffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INI(n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						    0x000120a0 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS +	\
+						    0x000120a0 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS +	\
+						    0x000120a0 + 0x4000 * \
+						    (n))
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ATTR 0x2
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						   0x000120a4 + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+						   0x000120a4 + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+						   0x000120a4 + 0x4000 * \
+						   (n))
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ATTR 0x2
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n) (GSI_REG_BASE + 0x000120b0 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x000120b0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x000120b0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_ATTR 0x1
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						  0x000120b8 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS + \
+						  0x000120b8 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS + \
+						  0x000120b8 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK 0xfffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INMI(n, mask) in_dword_masked(	\
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OUTMI(n, mask,	\
+					       val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INI(n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						  0x000120c0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+						  0x000120c0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+						  0x000120c0 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_MAXn 2
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ATTR 0x2
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n) (GSI_REG_BASE + 0x00012100 + \
+					       0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_PHYS(n) (GSI_REG_BASE_PHYS + \
+					       0x00012100 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) (GSI_REG_BASE_OFFS + \
+					       0x00012100 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK 0xf
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_MAXn 2
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ATTR 0x1
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n), \
+		HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK)
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_BMSK 0x8
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_SHFT 0x3
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_BMSK 0x4
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_SHFT 0x2
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_BMSK 0x2
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_SHFT 0x1
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK 0x1
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_SHFT 0x0
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_EN_ADDR(n) (GSI_REG_BASE + 0x00012108 + \
+					     0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_EN_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00012108 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00012108 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_CLR_ADDR(n) (GSI_REG_BASE + 0x00012110 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012110 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012110 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n) (GSI_REG_BASE + 0x00012118 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012118 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012118 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_RMSK 0xf
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_MAXn 2
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_ATTR 0x1
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n), \
+		HWIO_EE_n_CNTXT_GSI_IRQ_STTS_RMSK)
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_BMSK 0x8
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_SHFT 0x3
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_BMSK 0x2
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_SHFT 0x1
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_BMSK 0x1
+#define HWIO_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_SHFT 0x0
+#define HWIO_EE_n_CNTXT_GSI_IRQ_EN_ADDR(n) (GSI_REG_BASE + 0x00012120 +	\
+					    0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_EN_PHYS(n) (GSI_REG_BASE_PHYS +	\
+					    0x00012120 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_EN_OFFS(n) (GSI_REG_BASE_OFFS +	\
+					    0x00012120 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_CLR_ADDR(n) (GSI_REG_BASE + 0x00012128 + \
+					     0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+					     0x00012128 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+					     0x00012128 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INTSET_ADDR(n) (GSI_REG_BASE + 0x00012180 + \
+					0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INTSET_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012180 + \
+					0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INTSET_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012180 + \
+					0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INTSET_RMSK 0x1
+#define HWIO_EE_n_CNTXT_INTSET_MAXn 2
+#define HWIO_EE_n_CNTXT_INTSET_ATTR 0x3
+#define HWIO_EE_n_CNTXT_INTSET_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_INTSET_ADDR(n),	\
+		HWIO_EE_n_CNTXT_INTSET_RMSK)
+#define HWIO_EE_n_CNTXT_INTSET_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_INTSET_ADDR(n),	\
+		mask)
+#define HWIO_EE_n_CNTXT_INTSET_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_INTSET_ADDR(n),	\
+		val)
+#define HWIO_EE_n_CNTXT_INTSET_OUTMI(n, mask, val) out_dword_masked_ns(	\
+		HWIO_EE_n_CNTXT_INTSET_ADDR(n),	\
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_INTSET_INI(n))
+#define HWIO_EE_n_CNTXT_INTSET_INTYPE_BMSK 0x1
+#define HWIO_EE_n_CNTXT_INTSET_INTYPE_SHFT 0x0
+#define HWIO_EE_n_CNTXT_INTSET_INTYPE_MSI_FVAL 0x0
+#define HWIO_EE_n_CNTXT_INTSET_INTYPE_IRQ_FVAL 0x1
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n) (GSI_REG_BASE + 0x00012188 + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x00012188 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x00012188 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_MAXn 2
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_ATTR 0x3
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_RMSK)
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_MSI_BASE_LSB_INI(n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_SHFT 0x0
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n) (GSI_REG_BASE + 0x0001218c + \
+					      0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_PHYS(n) (GSI_REG_BASE_PHYS + \
+					      0x0001218c + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_OFFS(n) (GSI_REG_BASE_OFFS + \
+					      0x0001218c + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_MAXn 2
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_ATTR 0x3
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_RMSK)
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_MSI_BASE_MSB_INI(n))
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_SHFT 0x0
+#define HWIO_EE_n_CNTXT_INT_VEC_ADDR(n) (GSI_REG_BASE + 0x00012190 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INT_VEC_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012190 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_INT_VEC_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012190 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_ADDR(n) (GSI_REG_BASE + 0x00012200 + 0x4000 * \
+				     (n))
+#define HWIO_EE_n_ERROR_LOG_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012200 + \
+				     0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012200 + \
+				     0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_RMSK 0xffffffff
+#define HWIO_EE_n_ERROR_LOG_MAXn 2
+#define HWIO_EE_n_ERROR_LOG_ATTR 0x3
+#define HWIO_EE_n_ERROR_LOG_INI(n) in_dword_masked( \
+		HWIO_EE_n_ERROR_LOG_ADDR(n), \
+		HWIO_EE_n_ERROR_LOG_RMSK)
+#define HWIO_EE_n_ERROR_LOG_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_ERROR_LOG_ADDR(n), \
+		mask)
+#define HWIO_EE_n_ERROR_LOG_OUTI(n, val) out_dword( \
+		HWIO_EE_n_ERROR_LOG_ADDR(n), \
+		val)
+#define HWIO_EE_n_ERROR_LOG_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_EE_n_ERROR_LOG_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_EE_n_ERROR_LOG_INI(n))
+#define HWIO_EE_n_ERROR_LOG_ERROR_LOG_BMSK 0xffffffff
+#define HWIO_EE_n_ERROR_LOG_ERROR_LOG_SHFT 0x0
+#define HWIO_EE_n_ERROR_LOG_CLR_ADDR(n) (GSI_REG_BASE + 0x00012210 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_CLR_PHYS(n) (GSI_REG_BASE_PHYS + 0x00012210 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_CLR_OFFS(n) (GSI_REG_BASE_OFFS + 0x00012210 + \
+					 0x4000 * (n))
+#define HWIO_EE_n_ERROR_LOG_CLR_RMSK 0xffffffff
+#define HWIO_EE_n_ERROR_LOG_CLR_MAXn 2
+#define HWIO_EE_n_ERROR_LOG_CLR_ATTR 0x2
+#define HWIO_EE_n_ERROR_LOG_CLR_OUTI(n, val) out_dword(	\
+		HWIO_EE_n_ERROR_LOG_CLR_ADDR(n), \
+		val)
+#define HWIO_EE_n_ERROR_LOG_CLR_ERROR_LOG_CLR_BMSK 0xffffffff
+#define HWIO_EE_n_ERROR_LOG_CLR_ERROR_LOG_CLR_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n) (GSI_REG_BASE + 0x00012400 + \
+					   0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_0_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x00012400 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_0_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x00012400 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SCRATCH_0_MAXn 2
+#define HWIO_EE_n_CNTXT_SCRATCH_0_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SCRATCH_0_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		HWIO_EE_n_CNTXT_SCRATCH_0_RMSK)
+#define HWIO_EE_n_CNTXT_SCRATCH_0_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SCRATCH_0_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SCRATCH_0_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SCRATCH_0_INI(n))
+#define HWIO_EE_n_CNTXT_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n) (GSI_REG_BASE + 0x00012404 + \
+					   0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_1_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x00012404 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_1_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x00012404 + 0x4000 * (n))
+#define HWIO_EE_n_CNTXT_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SCRATCH_1_MAXn 2
+#define HWIO_EE_n_CNTXT_SCRATCH_1_ATTR 0x3
+#define HWIO_EE_n_CNTXT_SCRATCH_1_INI(n) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		HWIO_EE_n_CNTXT_SCRATCH_1_RMSK)
+#define HWIO_EE_n_CNTXT_SCRATCH_1_INMI(n, mask) in_dword_masked( \
+		HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		mask)
+#define HWIO_EE_n_CNTXT_SCRATCH_1_OUTI(n, val) out_dword( \
+		HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		val)
+#define HWIO_EE_n_CNTXT_SCRATCH_1_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_EE_n_CNTXT_SCRATCH_1_INI(n))
+#define HWIO_EE_n_CNTXT_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_EE_n_CNTXT_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_GSI_MCS_CFG_ADDR (GSI_REG_BASE + 0x0000b000)
+#define HWIO_GSI_MCS_CFG_PHYS (GSI_REG_BASE_PHYS + 0x0000b000)
+#define HWIO_GSI_MCS_CFG_OFFS (GSI_REG_BASE_OFFS + 0x0000b000)
+#define HWIO_GSI_TZ_FW_AUTH_LOCK_ADDR (GSI_REG_BASE + 0x0000b008)
+#define HWIO_GSI_TZ_FW_AUTH_LOCK_PHYS (GSI_REG_BASE_PHYS + 0x0000b008)
+#define HWIO_GSI_TZ_FW_AUTH_LOCK_OFFS (GSI_REG_BASE_OFFS + 0x0000b008)
+#define HWIO_GSI_MSA_FW_AUTH_LOCK_ADDR (GSI_REG_BASE + 0x0000b010)
+#define HWIO_GSI_MSA_FW_AUTH_LOCK_PHYS (GSI_REG_BASE_PHYS + 0x0000b010)
+#define HWIO_GSI_MSA_FW_AUTH_LOCK_OFFS (GSI_REG_BASE_OFFS + 0x0000b010)
+#define HWIO_GSI_SP_FW_AUTH_LOCK_ADDR (GSI_REG_BASE + 0x0000b018)
+#define HWIO_GSI_SP_FW_AUTH_LOCK_PHYS (GSI_REG_BASE_PHYS + 0x0000b018)
+#define HWIO_GSI_SP_FW_AUTH_LOCK_OFFS (GSI_REG_BASE_OFFS + 0x0000b018)
+#define HWIO_INTER_EE_n_ORIGINATOR_EE_ADDR(n) (GSI_REG_BASE + 0x0000c000 + \
+					       0x1000 * (n))
+#define HWIO_INTER_EE_n_ORIGINATOR_EE_PHYS(n) (GSI_REG_BASE_PHYS + \
+					       0x0000c000 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_ORIGINATOR_EE_OFFS(n) (GSI_REG_BASE_OFFS + \
+					       0x0000c000 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_GSI_CH_CMD_ADDR(n) (GSI_REG_BASE + 0x0000c008 +	\
+					    0x1000 * (n))
+#define HWIO_INTER_EE_n_GSI_CH_CMD_PHYS(n) (GSI_REG_BASE_PHYS +	\
+					    0x0000c008 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_GSI_CH_CMD_OFFS(n) (GSI_REG_BASE_OFFS +	\
+					    0x0000c008 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_EV_CH_CMD_ADDR(n) (GSI_REG_BASE + 0x0000c010 + \
+					   0x1000 * (n))
+#define HWIO_INTER_EE_n_EV_CH_CMD_PHYS(n) (GSI_REG_BASE_PHYS + \
+					   0x0000c010 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_EV_CH_CMD_OFFS(n) (GSI_REG_BASE_OFFS + \
+					   0x0000c010 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_ADDR(n) (GSI_REG_BASE + \
+						0x0000c018 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+						0x0000c018 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+						0x0000c018 + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_ADDR(n) (GSI_REG_BASE + 0x0000c01c + \
+					       0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_PHYS(n) (GSI_REG_BASE_PHYS + \
+					       0x0000c01c + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(n) (GSI_REG_BASE_OFFS + \
+					       0x0000c01c + 0x1000 * (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						    0x0000c020 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS +	\
+						    0x0000c020 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS +	\
+						    0x0000c020 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_MSK_ADDR(n) (GSI_REG_BASE + \
+						   0x0000c024 + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_MSK_PHYS(n) (GSI_REG_BASE_PHYS + \
+						   0x0000c024 + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) (GSI_REG_BASE_OFFS + \
+						   0x0000c024 + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						    0x0000c028 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS +	\
+						    0x0000c028 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS +	\
+						    0x0000c028 + 0x1000 * \
+						    (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_CLR_ADDR(n) (GSI_REG_BASE + \
+						   0x0000c02c + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_CLR_PHYS(n) (GSI_REG_BASE_PHYS + \
+						   0x0000c02c + 0x1000 * \
+						   (n))
+#define HWIO_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(n) (GSI_REG_BASE_OFFS + \
+						   0x0000c02c + 0x1000 * \
+						   (n))
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/gsi_hwio_def.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/gsi_hwio_def.h
new file mode 100644
index 0000000..efd0a2b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/gsi_hwio_def.h
@@ -0,0 +1,520 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_GSI_HWIO_DEF_H_)
+#define _GSI_HWIO_DEF_H_
+struct gsi_hwio_def_gsi_cfg_s {
+	u32	gsi_enable : 1;
+	u32	mcs_enable : 1;
+	u32	double_mcs_clk_freq : 1;
+	u32	uc_is_mcs : 1;
+	u32	gsi_pwr_clps : 1;
+	u32	bp_mtrix_disable : 1;
+	u32	reserved0 : 2;
+	u32	sleep_clk_div : 4;
+	u32	reserved1 : 20;
+};
+union gsi_hwio_def_gsi_cfg_u {
+	struct gsi_hwio_def_gsi_cfg_s	def;
+	u32				value;
+};
+struct gsi_hwio_def_gsi_ree_cfg_s {
+	u32	move_to_esc_clr_mode_trsh : 1;
+	u32	channel_empty_int_enable : 1;
+	u32	reserved0 : 6;
+	u32	max_burst_size : 8;
+	u32	reserved1 : 16;
+};
+union gsi_hwio_def_gsi_ree_cfg_u {
+	struct gsi_hwio_def_gsi_ree_cfg_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_gsi_manager_ee_qos_n_s {
+	u32	ee_prio : 2;
+	u32	reserved0 : 6;
+	u32	max_ch_alloc : 5;
+	u32	reserved1 : 3;
+	u32	max_ev_alloc : 5;
+	u32	reserved2 : 11;
+};
+union gsi_hwio_def_gsi_manager_ee_qos_n_u {
+	struct gsi_hwio_def_gsi_manager_ee_qos_n_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_gsi_shram_n_s {
+	u32 shram : 32;
+};
+union gsi_hwio_def_gsi_shram_n_u {
+	struct gsi_hwio_def_gsi_shram_n_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_s {
+	u32	phy_ch : 5;
+	u32	valid : 1;
+	u32	reserved0 : 26;
+};
+union gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_u {
+	struct gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_gsi_test_bus_sel_s {
+	u32	gsi_testbus_sel : 8;
+	u32	reserved0 : 8;
+	u32	gsi_hw_events_sel : 4;
+	u32	reserved1 : 12;
+};
+union gsi_hwio_def_gsi_test_bus_sel_u {
+	struct gsi_hwio_def_gsi_test_bus_sel_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_gsi_test_bus_reg_s {
+	u32 gsi_testbus_reg : 32;
+};
+union gsi_hwio_def_gsi_test_bus_reg_u {
+	struct gsi_hwio_def_gsi_test_bus_reg_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_gsi_debug_countern_s {
+	u32	counter_value : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_gsi_debug_countern_u {
+	struct gsi_hwio_def_gsi_debug_countern_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_gsi_debug_sw_rf_n_read_s {
+	u32 rf_reg : 32;
+};
+union gsi_hwio_def_gsi_debug_sw_rf_n_read_u {
+	struct gsi_hwio_def_gsi_debug_sw_rf_n_read_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_s {
+	u32	phy_ev_ch : 5;
+	u32	valid : 1;
+	u32	reserved0 : 26;
+};
+union gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_u {
+	struct gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_s {
+	u32	chtype_protocol : 3;
+	u32	chtype_dir : 1;
+	u32	ee : 4;
+	u32	chid : 5;
+	u32	chtype_protocol_msb : 1;
+	u32	erindex : 5;
+	u32	reserved0 : 1;
+	u32	chstate : 4;
+	u32	element_size : 8;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_s {
+	u32	r_length : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_s {
+	u32 r_base_addr_lsbs : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_s {
+	u32 r_base_addr_msbs : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_s {
+	u32 read_ptr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_s {
+	u32 read_ptr_msb : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_s {
+	u32 write_ptr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_s {
+	u32 write_ptr_msb : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_s {
+	u32	read_ptr : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_s {
+	u32	re_intr_db : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_qos_s {
+	u32	wrr_weight : 4;
+	u32	reserved0 : 4;
+	u32	max_prefetch : 1;
+	u32	use_db_eng : 1;
+	u32	prefetch_mode : 4;
+	u32	reserved1 : 2;
+	u32	empty_lvl_thrshold : 8;
+	u32	reserved2 : 8;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_qos_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_qos_s def;
+	u32					value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_u {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_s {
+	u32	chtype : 4;
+	u32	ee : 4;
+	u32	evchid : 8;
+	u32	intype : 1;
+	u32	reserved0 : 3;
+	u32	chstate : 4;
+	u32	element_size : 8;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_s {
+	u32	r_length : 16;
+	u32	reserved0 : 16;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_s {
+	u32 r_base_addr_lsbs : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_s {
+	u32 r_base_addr_msbs : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_s {
+	u32 read_ptr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_s {
+	u32 read_ptr_msb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_s {
+	u32 write_ptr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_s {
+	u32 write_ptr_msb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_s {
+	u32	int_modt : 16;
+	u32	int_modc : 8;
+	u32	int_mod_cnt : 8;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_s {
+	u32 intvec : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_s {
+	u32 msi_addr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_s {
+	u32 msi_addr_msb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_s {
+	u32 rp_update_addr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_s {
+	u32 rp_update_addr_msb : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_scratch_0_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_scratch_0_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_scratch_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_ev_ch_k_scratch_1_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_ev_ch_k_scratch_1_u {
+	struct gsi_hwio_def_ee_n_ev_ch_k_scratch_1_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_gsi_status_s {
+	u32	enabled : 1;
+	u32	reserved0 : 31;
+};
+union gsi_hwio_def_ee_n_gsi_status_u {
+	struct gsi_hwio_def_ee_n_gsi_status_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_ee_n_cntxt_type_irq_s {
+	u32	ch_ctrl : 1;
+	u32	ev_ctrl : 1;
+	u32	glob_ee : 1;
+	u32	ieob : 1;
+	u32	inter_ee_ch_ctrl : 1;
+	u32	inter_ee_ev_ctrl : 1;
+	u32	general : 1;
+	u32	reserved0 : 25;
+};
+union gsi_hwio_def_ee_n_cntxt_type_irq_u {
+	struct gsi_hwio_def_ee_n_cntxt_type_irq_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_type_irq_msk_s {
+	u32	ch_ctrl : 1;
+	u32	ev_ctrl : 1;
+	u32	glob_ee : 1;
+	u32	ieob : 1;
+	u32	inter_ee_ch_ctrl : 1;
+	u32	inter_ee_ev_ctrl : 1;
+	u32	general : 1;
+	u32	reserved0 : 25;
+};
+union gsi_hwio_def_ee_n_cntxt_type_irq_msk_u {
+	struct gsi_hwio_def_ee_n_cntxt_type_irq_msk_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_s {
+	u32 gsi_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_s def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_s {
+	u32 ev_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_s {
+	u32	gsi_ch_bit_map_msk : 23;
+	u32	reserved0 : 9;
+};
+union gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_s {
+	u32	ev_ch_bit_map_msk : 20;
+	u32	reserved0 : 12;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_clr_s {
+	u32 gsi_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_clr_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_clr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_clr_s {
+	u32 ev_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_clr_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_clr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_s {
+	u32 ev_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ieob_irq_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_s {
+	u32	ev_ch_bit_map_msk : 20;
+	u32	reserved0 : 12;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_clr_s {
+	u32 ev_ch_bit_map : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_src_ieob_irq_clr_u {
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_clr_s	def;
+	u32							value;
+};
+struct gsi_hwio_def_ee_n_cntxt_glob_irq_stts_s {
+	u32	error_int : 1;
+	u32	gp_int1 : 1;
+	u32	gp_int2 : 1;
+	u32	gp_int3 : 1;
+	u32	reserved0 : 28;
+};
+union gsi_hwio_def_ee_n_cntxt_glob_irq_stts_u {
+	struct gsi_hwio_def_ee_n_cntxt_glob_irq_stts_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_s {
+	u32	gsi_break_point : 1;
+	u32	gsi_bus_error : 1;
+	u32	gsi_cmd_fifo_ovrflow : 1;
+	u32	gsi_mcs_stack_ovrflow : 1;
+	u32	reserved0 : 28;
+};
+union gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_u {
+	struct gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_intset_s {
+	u32	intype : 1;
+	u32	reserved0 : 31;
+};
+union gsi_hwio_def_ee_n_cntxt_intset_u {
+	struct gsi_hwio_def_ee_n_cntxt_intset_s def;
+	u32					value;
+};
+struct gsi_hwio_def_ee_n_cntxt_msi_base_lsb_s {
+	u32 msi_addr_lsb : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_msi_base_lsb_u {
+	struct gsi_hwio_def_ee_n_cntxt_msi_base_lsb_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_msi_base_msb_s {
+	u32 msi_addr_msb : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_msi_base_msb_u {
+	struct gsi_hwio_def_ee_n_cntxt_msi_base_msb_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_error_log_s {
+	u32 error_log : 32;
+};
+union gsi_hwio_def_ee_n_error_log_u {
+	struct gsi_hwio_def_ee_n_error_log_s	def;
+	u32					value;
+};
+struct gsi_hwio_def_ee_n_error_log_clr_s {
+	u32 error_log_clr : 32;
+};
+union gsi_hwio_def_ee_n_error_log_clr_u {
+	struct gsi_hwio_def_ee_n_error_log_clr_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_scratch_0_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_scratch_0_u {
+	struct gsi_hwio_def_ee_n_cntxt_scratch_0_s	def;
+	u32						value;
+};
+struct gsi_hwio_def_ee_n_cntxt_scratch_1_s {
+	u32 scratch : 32;
+};
+union gsi_hwio_def_ee_n_cntxt_scratch_1_u {
+	struct gsi_hwio_def_ee_n_cntxt_scratch_1_s	def;
+	u32						value;
+};
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_access_control.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_access_control.h
new file mode 100644
index 0000000..3fdb2ed
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_access_control.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_ACCESS_CONTROL_H_)
+#define _IPA_ACCESS_CONTROL_H_
+
+#include "ipa_reg_dump.h"
+
+/*
+ * The following is target specific.
+ */
+static struct reg_mem_access_map_t mem_access_map[] = {
+	/*------------------------------------------------------------*/
+	/*      Range               Use when              Use when    */
+	/*  Begin    End           SD_ENABLED           SD_DISABLED   */
+	/*------------------------------------------------------------*/
+	{ 0x04000, 0x05000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x1F000, 0x27000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x05000, 0x0f000, { &io_matrix[AA_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x0f000, 0x10000, { &io_matrix[NN_COMBO], &io_matrix[NN_COMBO] } },
+	{ 0x13000, 0x17000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x17000, 0x1b000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x1b000, 0x1f000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x10000, 0x11000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x11000, 0x12000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x12000, 0x13000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x43000, 0x44000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x44000, 0x45000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x45000, 0x47000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x40000, 0x42000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x42000, 0x43000, { &io_matrix[AA_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0x50000, 0x60000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0x60000, 0x80000, { &io_matrix[AN_COMBO], &io_matrix[NN_COMBO] } },
+	{ 0x80000, 0x81000, { &io_matrix[NN_COMBO], &io_matrix[NN_COMBO] } },
+	{ 0x81000, 0x83000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0xa0000, 0xc0000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
+	{ 0xc0000, 0xc2000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+	{ 0xc2000, 0xd0000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
+};
+
+#endif /* #if !defined(_IPA_ACCESS_CONTROL_H_) */
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio.h
new file mode 100644
index 0000000..0adf6ad
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_GCC_HWIO_H_)
+#define _IPA_GCC_HWIO_H_
+/*
+ *
+ * HWIO register definitions to follow:
+ *
+ */
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio_def.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio_def.h
new file mode 100644
index 0000000..c841bac
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_gcc_hwio_def.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_GCC_HWIO_DEF_H_)
+#define _IPA_GCC_HWIO_DEF_H_
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h
new file mode 100644
index 0000000..9d8e8a2
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h
@@ -0,0 +1,472 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_HW_COMMON_EX_H_)
+#define _IPA_HW_COMMON_EX_H_
+
+/* VLVL defs are available for 854 */
+#define FEATURE_VLVL_DEFS                            true
+
+/* IPAv4 version flag for Sdx24 */
+#define FEATURE_IPA_HW_VERSION_4_0                   true
+
+/* Important Platform Specific Values : IRQ_NUM, IRQ_CNT, BCR */
+#define IPA_HW_BAM_IRQ_NUM                           440
+
+/* Q6 IRQ number for IPA. */
+#define IPA_HW_IRQ_NUM                               441
+
+/* Total number of different interrupts that can be enabled */
+#define IPA_HW_IRQ_CNT_TOTAL                         23
+
+/* IPAv4 BCR value */
+#define IPA_HW_BCR_REG_VAL                           0x00000039
+
+/* IPAv4 spare reg value */
+#define IPA_HW_SPARE_1_REG_VAL                       0xC0000005
+
+/* Whether to allow setting step mode on IPA when we crash or not */
+#define IPA_CFG_HW_IS_STEP_MODE_ALLOWED              (false)
+
+/* GSI MHI related definitions */
+#define IPA_HW_GSI_MHI_CONSUMER_CHANNEL_NUM          0x0
+#define IPA_HW_GSI_MHI_PRODUCER_CHANNEL_NUM          0x1
+
+#define IPA_HW_GSI_MHI_CONSUMER_EP_NUM               0x1
+#define IPA_HW_GSI_MHI_PRODUCER_EP_NUM               0x11
+
+/* IPA ZIP WA related Macros */
+#define IPA_HW_DCMP_SRC_PIPE                         0x8
+#define IPA_HW_DCMP_DEST_PIPE                        0x4
+#define IPA_HW_ACK_MNGR_MASK                         0x1D
+#define IPA_HW_DCMP_SRC_GRP                          0x5
+
+/* IPA Clock resource name */
+#define IPA_CLK_RESOURCE_NAME                        "/clk/pcnoc"
+
+/* IPA Clock Bus Client name */
+#define IPA_CLK_BUS_CLIENT_NAME                      "IPA_PCNOC_BUS_CLIENT"
+
+/* HPS Sequences */
+#define IPA_HW_PKT_PROCESS_HPS_DMA                      0x0
+#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_DECIPH_UCP       0x3
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_NO_DECIPH      0x4
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_DECIPH         0x5
+#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_NO_DECIPH_NO_UCP 0x6
+#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_DECIPH_NO_UCP    0x7
+#define IPA_HW_PKT_PROCESS_HPS_DMA_PARSER               0x8
+#define IPA_HW_PKT_PROCESS_HPS_DMA_DECIPH_PARSER        0x9
+
+/* DPS Sequences */
+#define IPA_HW_PKT_PROCESS_DPS_DMA                      0x0
+#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_DECIPH          0x1
+#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_DECOMP          0x2
+
+/* Src RSRC GRP config */
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_0           0x05050404
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_1           0x0A0A0A0A
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_2           0x0C0C0C0C
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_3           0x3F003F00
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_4           0x0E0E0E0E
+
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_0           0x00000101
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_1           0x00000808
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_2           0x00000808
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_3           0x3F003F00
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_4           0x00000E0E
+
+/* Dest RSRC GRP config */
+#define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_0           0x04040404
+#define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_1           0x3F013F02
+
+#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_0           0x02020303
+#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_1           0x02000201
+
+
+#define IPA_HW_RX_HPS_CLIENTS_MIN_DEPTH_0            0x00020703
+#define IPA_HW_RX_HPS_CLIENTS_MAX_DEPTH_0            0x00020703
+
+#define IPA_HW_RSRP_GRP_0                            0x0
+#define IPA_HW_RSRP_GRP_1                            0x1
+#define IPA_HW_RSRP_GRP_2                            0x2
+#define IPA_HW_RSRP_GRP_3                            0x3
+
+#define IPA_HW_PCIE_SRC_RSRP_GRP                     IPA_HW_RSRP_GRP_0
+#define IPA_HW_PCIE_DEST_RSRP_GRP                    IPA_HW_RSRP_GRP_0
+
+#define IPA_HW_DDR_SRC_RSRP_GRP                      IPA_HW_RSRP_GRP_1
+#define IPA_HW_DDR_DEST_RSRP_GRP                     IPA_HW_RSRP_GRP_1
+
+#define IPA_HW_SRC_RSRP_TYPE_MAX                     0x4
+#define IPA_HW_DST_RSRP_TYPE_MAX                     0x3
+
+#define GSI_HW_QSB_LOG_MISC_MAX 0x4
+
+/* IPA Clock Bus Client name */
+#define IPA_CLK_BUS_CLIENT_NAME                      "IPA_PCNOC_BUS_CLIENT"
+
+/* Is IPA decompression feature enabled */
+#define IPA_HW_IS_DECOMPRESSION_ENABLED              (1)
+
+/* Whether to allow setting step mode on IPA when we crash or not */
+#define IPA_HW_IS_STEP_MODE_ALLOWED                  (true)
+
+/*
+ * HW specific clock vote freq values in KHz
+ * (BIMC/SNOC/PCNOC/IPA/Q6 CPU)
+ */
+enum ipa_hw_clk_freq_e {
+	/* BIMC */
+	IPA_HW_CLK_FREQ_BIMC_PEAK       = 518400,
+	IPA_HW_CLK_FREQ_BIMC_NOM_PLUS   = 404200,
+	IPA_HW_CLK_FREQ_BIMC_NOM        = 404200,
+	IPA_HW_CLK_FREQ_BIMC_SVS        = 100000,
+
+	/* PCNOC */
+	IPA_HW_CLK_FREQ_PCNOC_PEAK      = 133330,
+	IPA_HW_CLK_FREQ_PCNOC_NOM_PLUS  = 100000,
+	IPA_HW_CLK_FREQ_PCNOC_NOM       = 100000,
+	IPA_HW_CLK_FREQ_PCNOC_SVS       = 50000,
+
+	/*IPA_HW_CLK_SNOC*/
+	IPA_HW_CLK_FREQ_SNOC_PEAK       = 200000,
+	IPA_HW_CLK_FREQ_SNOC_NOM_PLUS   = 150000,
+	IPA_HW_CLK_FREQ_SNOC_NOM        = 150000,
+	IPA_HW_CLK_FREQ_SNOC_SVS        = 85000,
+	IPA_HW_CLK_FREQ_SNOC_SVS_2      = 50000,
+
+	/* IPA */
+	IPA_HW_CLK_FREQ_IPA_PEAK        = 500000,
+	IPA_HW_CLK_FREQ_IPA_NOM_PLUS    = 440000,
+	IPA_HW_CLK_FREQ_IPA_NOM         = 440000,
+	IPA_HW_CLK_FREQ_IPA_SVS         = 250000,
+	IPA_HW_CLK_FREQ_IPA_SVS_2       = 120000,
+
+	/* Q6 CPU */
+	IPA_HW_CLK_FREQ_Q6_PEAK         = 729600,
+	IPA_HW_CLK_FREQ_Q6_NOM_PLUS     = 729600,
+	IPA_HW_CLK_FREQ_Q6_NOM          = 729600,
+	IPA_HW_CLK_FREQ_Q6_SVS          = 729600,
+};
+
+/* Pipe ID of all the IPA pipes */
+enum ipa_hw_pipe_id_e {
+	IPA_HW_PIPE_ID_0,
+	IPA_HW_PIPE_ID_1,
+	IPA_HW_PIPE_ID_2,
+	IPA_HW_PIPE_ID_3,
+	IPA_HW_PIPE_ID_4,
+	IPA_HW_PIPE_ID_5,
+	IPA_HW_PIPE_ID_6,
+	IPA_HW_PIPE_ID_7,
+	IPA_HW_PIPE_ID_8,
+	IPA_HW_PIPE_ID_9,
+	IPA_HW_PIPE_ID_10,
+	IPA_HW_PIPE_ID_11,
+	IPA_HW_PIPE_ID_12,
+	IPA_HW_PIPE_ID_13,
+	IPA_HW_PIPE_ID_14,
+	IPA_HW_PIPE_ID_15,
+	IPA_HW_PIPE_ID_16,
+	IPA_HW_PIPE_ID_17,
+	IPA_HW_PIPE_ID_18,
+	IPA_HW_PIPE_ID_19,
+	IPA_HW_PIPE_ID_20,
+	IPA_HW_PIPE_ID_21,
+	IPA_HW_PIPE_ID_22,
+	IPA_HW_PIPE_ID_MAX
+};
+
+/* Pipe ID's of System Bam Endpoints between Q6 & IPA */
+enum ipa_hw_q6_pipe_id_e {
+	/* Pipes used by IPA Q6 driver */
+	IPA_HW_Q6_DL_CONSUMER_PIPE_ID           = IPA_HW_PIPE_ID_3,
+	IPA_HW_Q6_CTL_CONSUMER_PIPE_ID          = IPA_HW_PIPE_ID_4,
+	IPA_HW_Q6_UL_PRODUCER_PIPE_ID           = IPA_HW_PIPE_ID_13,
+	IPA_HW_Q6_DL_PRODUCER_PIPE_ID           = IPA_HW_PIPE_ID_14,
+
+	IPA_HW_Q6_LTE_DL_CONSUMER_PIPE_ID       = IPA_HW_PIPE_ID_6,
+	IPA_HW_Q6_LWA_DL_PRODUCER_PIPE_ID       = IPA_HW_PIPE_ID_16,
+	/* Test Simulator Pipes */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_ID     = IPA_HW_PIPE_ID_0,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_ID     = IPA_HW_PIPE_ID_12,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_ID     = IPA_HW_PIPE_ID_1,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_ID     = IPA_HW_PIPE_ID_10,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_ID     = IPA_HW_PIPE_ID_2,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_ID     = IPA_HW_PIPE_ID_11,
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_ID     = IPA_HW_PIPE_ID_5,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_ID     = IPA_HW_PIPE_ID_17,
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_ID     = IPA_HW_PIPE_ID_7,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_ID     = IPA_HW_PIPE_ID_18,
+	IPA_HW_Q6_DIAG_CONSUMER_PIPE_ID         = IPA_HW_PIPE_ID_19,
+	IPA_HW_Q6_PIPE_ID_MAX                   = IPA_HW_PIPE_ID_MAX,
+};
+
+enum ipa_hw_q6_pipe_ch_id_e {
+	/* Channels used by IPA Q6 driver */
+	IPA_HW_Q6_DL_CONSUMER_PIPE_CH_ID           = 0,
+	IPA_HW_Q6_CTL_CONSUMER_PIPE_CH_ID          = 1,
+	IPA_HW_Q6_UL_PRODUCER_PIPE_CH_ID           = 3,
+	IPA_HW_Q6_DL_PRODUCER_PIPE_CH_ID           = 4,
+
+	IPA_HW_Q6_LTE_DL_CONSUMER_PIPE_CH_ID       = 2,
+	IPA_HW_Q6_LWA_DL_PRODUCER_PIPE_CH_ID       = 5,
+	/* Test Simulator Channels */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_CH_ID     = 6,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_CH_ID     = 8,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_CH_ID     = 9,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_CH_ID     = 10,
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_CH_ID     = 11,
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_CH_ID     = 12,
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_CH_ID     = 13,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_CH_ID     = 14,
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_CH_ID     = 15,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_CH_ID     = 16,
+};
+
+/* System Bam Endpoints between Q6 & IPA */
+enum ipa_hw_q6_pipe_e {
+	/* DL Pipe IPA->Q6 */
+	IPA_HW_Q6_DL_PRODUCER_PIPE = 0,
+	/* UL Pipe IPA->Q6 */
+	IPA_HW_Q6_UL_PRODUCER_PIPE = 1,
+	/* DL Pipe Q6->IPA */
+	IPA_HW_Q6_DL_CONSUMER_PIPE = 2,
+	/* CTL Pipe Q6->IPA */
+	IPA_HW_Q6_CTL_CONSUMER_PIPE = 3,
+	/*  Q6 -> IPA,  LTE DL Optimized path */
+	IPA_HW_Q6_LTE_DL_CONSUMER_PIPE = 4,
+	/* LWA DL(Wifi to Q6) */
+	IPA_HW_Q6_LWA_DL_PRODUCER_PIPE = 5,
+	/* Diag status pipe IPA->Q6 */
+	/* Used only when FEATURE_IPA_TEST_PER_SIM is ON */
+	/* SIM Pipe IPA->Sim */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0 = 7,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1 = 8,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2 = 9,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0 = 10,
+	/* SIM B2B PROD Pipe  */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1 = 11,
+	/* SIM Pipe IPA->Sim */
+	IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2 = 12,
+	/* End FEATURE_IPA_TEST_PER_SIM */
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1 = 13,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1 = 14,
+	/* GSI UT channel SW->IPA */
+	IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2 = 15,
+	/* GSI UT channel IPA->SW */
+	IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2 = 16,
+	IPA_HW_Q6_PIPE_TOTAL
+};
+
+/* System Bam Endpoints between Q6 & IPA */
+enum ipa_hw_q6_gsi_ev_e { /* In Sdx24 0..11 */
+	/* DL Pipe IPA->Q6 */
+	IPA_HW_Q6_DL_PRODUCER_PIPE_GSI_EV = 0,
+	/* UL Pipe IPA->Q6 */
+	IPA_HW_Q6_UL_PRODUCER_PIPE_GSI_EV = 1,
+	/* DL Pipe Q6->IPA */
+	//IPA_HW_Q6_DL_CONSUMER_PIPE_GSI_EV = 2,
+	/* CTL Pipe Q6->IPA */
+	//IPA_HW_Q6_CTL_CONSUMER_PIPE_GSI_EV = 3,
+	/*  Q6 -> IPA,  LTE DL Optimized path */
+	//IPA_HW_Q6_LTE_DL_CONSUMER_PIPE_GSI_EV = 4,
+	/* LWA DL(Wifi to Q6) */
+	//IPA_HW_Q6_LWA_DL_PRODUCER_PIPE_GSI_EV = 5,
+	/* Diag status pipe IPA->Q6 */
+	//IPA_HW_Q6_DIAG_STATUS_PRODUCER_PIPE_GSI_EV = 6,
+	/* Used only when FEATURE_IPA_TEST_PER_SIM is ON */
+	/* SIM Pipe IPA->Sim */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_GSI_EV = 2,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_GSI_EV = 3,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_GSI_EV = 4,
+	/* SIM Pipe Sim->IPA */
+	IPA_HW_Q6_SIM_1_GSI_EV = 5,
+	IPA_HW_Q6_SIM_2_GSI_EV = 6,
+	IPA_HW_Q6_SIM_3_GSI_EV = 7,
+	IPA_HW_Q6_SIM_4_GSI_EV = 8,
+
+	IPA_HW_Q6_PIPE_GSI_EV_TOTAL
+};
+
+/*
+ * All the IRQ's supported by the IPA HW. Use this enum to set IRQ_EN
+ * register and read IRQ_STTS register
+ */
+enum ipa_hw_irq_e {
+	IPA_HW_IRQ_GSI_HWP                     = (1 << 25),
+	IPA_HW_IRQ_GSI_IPA_IF_TLV_RCVD         = (1 << 24),
+	IPA_HW_IRQ_GSI_EE_IRQ                  = (1 << 23),
+	IPA_HW_IRQ_DCMP_ERR                    = (1 << 22),
+	IPA_HW_IRQ_HWP_ERR                     = (1 << 21),
+	IPA_HW_IRQ_RED_MARKER_ABOVE            = (1 << 20),
+	IPA_HW_IRQ_YELLOW_MARKER_ABOVE         = (1 << 19),
+	IPA_HW_IRQ_RED_MARKER_BELOW            = (1 << 18),
+	IPA_HW_IRQ_YELLOW_MARKER_BELOW         = (1 << 17),
+	IPA_HW_IRQ_BAM_IDLE_IRQ                = (1 << 16),
+	IPA_HW_IRQ_TX_HOLB_DROP                = (1 << 15),
+	IPA_HW_IRQ_TX_SUSPEND                  = (1 << 14),
+	IPA_HW_IRQ_PROC_ERR                    = (1 << 13),
+	IPA_HW_IRQ_STEP_MODE                   = (1 << 12),
+	IPA_HW_IRQ_TX_ERR                      = (1 << 11),
+	IPA_HW_IRQ_DEAGGR_ERR                  = (1 << 10),
+	IPA_HW_IRQ_RX_ERR                      = (1 << 9),
+	IPA_HW_IRQ_PROC_TO_HW_ACK_Q_NOT_EMPTY  = (1 << 8),
+	IPA_HW_IRQ_HWP_RX_CMD_Q_NOT_FULL       = (1 << 7),
+	IPA_HW_IRQ_HWP_IN_Q_NOT_EMPTY          = (1 << 6),
+	IPA_HW_IRQ_HWP_IRQ_3                   = (1 << 5),
+	IPA_HW_IRQ_HWP_IRQ_2                   = (1 << 4),
+	IPA_HW_IRQ_HWP_IRQ_1                   = (1 << 3),
+	IPA_HW_IRQ_HWP_IRQ_0                   = (1 << 2),
+	IPA_HW_IRQ_EOT_COAL                    = (1 << 1),
+	IPA_HW_IRQ_BAD_SNOC_ACCESS             = (1 << 0),
+	IPA_HW_IRQ_NONE                        = 0,
+	IPA_HW_IRQ_ALL                         = 0xFFFFFFFF
+};
+
+/*
+ * All the IRQ sources supported by the IPA HW. Use this enum to set
+ * IRQ_SRCS register
+ */
+enum ipa_hw_irq_srcs_e {
+	IPA_HW_IRQ_SRCS_PIPE_0  = (1 << IPA_HW_PIPE_ID_0),
+	IPA_HW_IRQ_SRCS_PIPE_1  = (1 << IPA_HW_PIPE_ID_1),
+	IPA_HW_IRQ_SRCS_PIPE_2  = (1 << IPA_HW_PIPE_ID_2),
+	IPA_HW_IRQ_SRCS_PIPE_3  = (1 << IPA_HW_PIPE_ID_3),
+	IPA_HW_IRQ_SRCS_PIPE_4  = (1 << IPA_HW_PIPE_ID_4),
+	IPA_HW_IRQ_SRCS_PIPE_5  = (1 << IPA_HW_PIPE_ID_5),
+	IPA_HW_IRQ_SRCS_PIPE_6  = (1 << IPA_HW_PIPE_ID_6),
+	IPA_HW_IRQ_SRCS_PIPE_7  = (1 << IPA_HW_PIPE_ID_7),
+	IPA_HW_IRQ_SRCS_PIPE_8  = (1 << IPA_HW_PIPE_ID_8),
+	IPA_HW_IRQ_SRCS_PIPE_9  = (1 << IPA_HW_PIPE_ID_9),
+	IPA_HW_IRQ_SRCS_PIPE_10 = (1 << IPA_HW_PIPE_ID_10),
+	IPA_HW_IRQ_SRCS_PIPE_11 = (1 << IPA_HW_PIPE_ID_11),
+	IPA_HW_IRQ_SRCS_PIPE_12 = (1 << IPA_HW_PIPE_ID_12),
+	IPA_HW_IRQ_SRCS_PIPE_13 = (1 << IPA_HW_PIPE_ID_13),
+	IPA_HW_IRQ_SRCS_PIPE_14 = (1 << IPA_HW_PIPE_ID_14),
+	IPA_HW_IRQ_SRCS_PIPE_15 = (1 << IPA_HW_PIPE_ID_15),
+	IPA_HW_IRQ_SRCS_PIPE_16 = (1 << IPA_HW_PIPE_ID_16),
+	IPA_HW_IRQ_SRCS_PIPE_17 = (1 << IPA_HW_PIPE_ID_17),
+	IPA_HW_IRQ_SRCS_PIPE_18 = (1 << IPA_HW_PIPE_ID_18),
+	IPA_HW_IRQ_SRCS_PIPE_19 = (1 << IPA_HW_PIPE_ID_19),
+	IPA_HW_IRQ_SRCS_PIPE_20 = (1 << IPA_HW_PIPE_ID_20),
+	IPA_HW_IRQ_SRCS_PIPE_21 = (1 << IPA_HW_PIPE_ID_21),
+	IPA_HW_IRQ_SRCS_PIPE_22 = (1 << IPA_HW_PIPE_ID_22),
+	IPA_HW_IRQ_SRCS_NONE    = 0,
+	IPA_HW_IRQ_SRCS_ALL     = 0xFFFFFFFF,
+};
+
+/*
+ * Total number of channel contexts that need to be saved for APPS
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7          14
+
+/*
+ * Total number of channel contexts that need to be saved for Q6
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_Q6          6
+
+/*
+ * Total number of channel contexts that need to be saved for UC
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC          2
+
+/*
+ * Total number of event ring contexts that need to be saved for APPS
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7         12
+
+/*
+ * Total number of event ring contexts that need to be saved for Q6
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_Q6         4
+
+/*
+ * Total number of event ring contexts that need to be saved for UC
+ */
+#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_UC         1
+
+/*
+ * Total number of endpoints for which ipa_reg_save.pipes[endp_number]
+ * are not saved by default (only if ipa_cfg.gen.full_reg_trace =
+ * true) There is no extra endpoints in Stingray
+ */
+#define IPA_HW_REG_SAVE_NUM_ENDP_EXTRA               0
+
+/*
+ * Total number of endpoints for which ipa_reg_save.pipes[endp_number]
+ * are always saved
+ */
+#define IPA_HW_REG_SAVE_NUM_ACTIVE_PIPES             23
+
+/*
+ * Macro to set the active flag for all active pipe indexed register
+ */
+#define IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_ACTIVE() \
+	do { \
+		ipa_reg_save.ipa.pipes[0].active = true; \
+		ipa_reg_save.ipa.pipes[1].active = true; \
+		ipa_reg_save.ipa.pipes[2].active = true; \
+		ipa_reg_save.ipa.pipes[3].active = true; \
+		ipa_reg_save.ipa.pipes[4].active = true; \
+		ipa_reg_save.ipa.pipes[5].active = true; \
+		ipa_reg_save.ipa.pipes[6].active = true; \
+		ipa_reg_save.ipa.pipes[7].active = true; \
+		ipa_reg_save.ipa.pipes[8].active = true; \
+		ipa_reg_save.ipa.pipes[9].active = true; \
+		ipa_reg_save.ipa.pipes[10].active = true; \
+		ipa_reg_save.ipa.pipes[11].active = true; \
+		ipa_reg_save.ipa.pipes[12].active = true; \
+		ipa_reg_save.ipa.pipes[13].active = true; \
+		ipa_reg_save.ipa.pipes[14].active = true; \
+		ipa_reg_save.ipa.pipes[15].active = true; \
+		ipa_reg_save.ipa.pipes[16].active = true; \
+		ipa_reg_save.ipa.pipes[17].active = true; \
+		ipa_reg_save.ipa.pipes[18].active = true; \
+		ipa_reg_save.ipa.pipes[19].active = true; \
+		ipa_reg_save.ipa.pipes[20].active = true; \
+		ipa_reg_save.ipa.pipes[21].active = true; \
+		ipa_reg_save.ipa.pipes[22].active = true; \
+	} while (0)
+
+
+/*
+ * Total number of rx splt cmdq's see:
+ * ipa_rx_splt_cmdq_n_cmd[IPA_RX_SPLT_CMDQ_MAX]
+ */
+#define IPA_RX_SPLT_CMDQ_MAX 4
+
+/*
+ * Macro to define a particular register cfg entry for the remaining
+ * pipe indexed register.  In Stingray case we don't have extra
+ * endpoints so it is intentially empty
+ */
+#define IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(REG_NAME, VAR_NAME)  \
+	{ 0, 0 }
+
+/*
+ * Macro to set the active flag for all active pipe indexed register
+ * In Stingray case we don't have extra endpoints so it is intentially
+ * empty
+ */
+#define IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA_ACTIVE()  \
+	do { \
+	} while (0)
+
+#endif /* #if !defined(_IPA_HW_COMMON_EX_H_) */
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h
new file mode 100644
index 0000000..9ab8667
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h
@@ -0,0 +1,10768 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_HWIO_H_)
+#define _IPA_HWIO_H_
+/*
+ *
+ * HWIO register definitions to follow:
+ *
+ */
+#define IPA_GSI_TOP_GSI_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00004000)
+#define IPA_GSI_TOP_GSI_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + \
+				       0x00004000)
+#define IPA_GSI_TOP_GSI_REG_BASE_OFFS 0x00004000
+#define HWIO_IPA_GSI_TOP_GSI_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+				       0x00000000)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+				       0x00000000)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+				       0x00000000)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_RMSK 0xf3f
+#define HWIO_IPA_GSI_TOP_GSI_CFG_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_CFG_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_CFG_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_CFG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_CFG_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_CFG_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_CFG_IN)
+#define HWIO_IPA_GSI_TOP_GSI_CFG_SLEEP_CLK_DIV_BMSK 0xf00
+#define HWIO_IPA_GSI_TOP_GSI_CFG_SLEEP_CLK_DIV_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10
+#define HWIO_IPA_GSI_TOP_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_GSI_CFG_UC_IS_MCS_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_GSI_CFG_UC_IS_MCS_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_GSI_CFG_MCS_ENABLE_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_GSI_CFG_MCS_ENABLE_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_GSI_CFG_GSI_ENABLE_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_CFG_GSI_ENABLE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_MCS_CODE_VER_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000008)
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_MCS_CODE_VER_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000008)
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_MCS_CODE_VER_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000008)
+#define HWIO_IPA_GSI_TOP_GSI_ZEROS_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					 0x00000010)
+#define HWIO_IPA_GSI_TOP_GSI_ZEROS_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+					 0x00000010)
+#define HWIO_IPA_GSI_TOP_GSI_ZEROS_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+					 0x00000010)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000018)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000018)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000018)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000001c)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000001c)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_BASE_ADDR_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000001c)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_PENDING_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000020)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_PENDING_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000020)
+#define HWIO_IPA_GSI_TOP_GSI_PERIPH_PENDING_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000020)
+#define HWIO_IPA_GSI_TOP_GSI_MOQA_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					    0x00000030)
+#define HWIO_IPA_GSI_TOP_GSI_MOQA_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS \
+					    + 0x00000030)
+#define HWIO_IPA_GSI_TOP_GSI_MOQA_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS \
+					    + 0x00000030)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					   0x00000038)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+					   0x00000038)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+					   0x00000038)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_RMSK 0xff03
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_REE_CFG_IN)
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_MAX_BURST_SIZE_BMSK 0xff00
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_MAX_BURST_SIZE_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_CHANNEL_EMPTY_INT_ENABLE_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_CHANNEL_EMPTY_INT_ENABLE_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_CGC_CTRL_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					    0x00000060)
+#define HWIO_IPA_GSI_TOP_GSI_CGC_CTRL_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS \
+					    + 0x00000060)
+#define HWIO_IPA_GSI_TOP_GSI_CGC_CTRL_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS \
+					    + 0x00000060)
+#define HWIO_IPA_GSI_TOP_GSI_MSI_CACHEATTR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00000080)
+#define HWIO_IPA_GSI_TOP_GSI_MSI_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000080)
+#define HWIO_IPA_GSI_TOP_GSI_MSI_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000080)
+#define HWIO_IPA_GSI_TOP_GSI_EVENT_CACHEATTR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000084)
+#define HWIO_IPA_GSI_TOP_GSI_EVENT_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000084)
+#define HWIO_IPA_GSI_TOP_GSI_EVENT_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000084)
+#define HWIO_IPA_GSI_TOP_GSI_DATA_CACHEATTR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000088)
+#define HWIO_IPA_GSI_TOP_GSI_DATA_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000088)
+#define HWIO_IPA_GSI_TOP_GSI_DATA_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000088)
+#define HWIO_IPA_GSI_TOP_GSI_TRE_CACHEATTR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00000090)
+#define HWIO_IPA_GSI_TOP_GSI_TRE_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000090)
+#define HWIO_IPA_GSI_TOP_GSI_TRE_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000090)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000a0)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000a0)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000a0)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000a4)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000a4)
+#define HWIO_IPA_GSI_TOP_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000a4)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_LSB_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000a8)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_LSB_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000a8)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_LSB_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000a8)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_MSB_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000ac)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_MSB_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000ac)
+#define HWIO_IPA_GSI_TOP_IC_GEN_EVNT_BCK_PRS_MSB_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000ac)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000b0)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000b0)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000b0)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000b4)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000b4)
+#define HWIO_IPA_GSI_TOP_IC_GEN_INT_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000b4)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000b8)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000b8)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000b8)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000bc)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000bc)
+#define HWIO_IPA_GSI_TOP_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000bc)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000c0)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000c0)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000c0)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000c4)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000c4)
+#define HWIO_IPA_GSI_TOP_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000c4)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_LSB_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000c8)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_LSB_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000c8)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_LSB_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000c8)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_MSB_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000cc)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_MSB_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000cc)
+#define HWIO_IPA_GSI_TOP_IC_TLV_STOP_BCK_PRS_MSB_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000cc)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000d0)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000d0)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000d0)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000d4)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000d4)
+#define HWIO_IPA_GSI_TOP_IC_TLV_RESET_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000d4)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000d8)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000d8)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000d8)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000dc)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000dc)
+#define HWIO_IPA_GSI_TOP_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000dc)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000e0)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000e0)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000e0)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000e4)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000e4)
+#define HWIO_IPA_GSI_TOP_IC_READ_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000e4)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000e8)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000e8)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000e8)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000ec)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000ec)
+#define HWIO_IPA_GSI_TOP_IC_WRITE_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000ec)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000f0)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000f0)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000f0)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000000f4)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000000f4)
+#define HWIO_IPA_GSI_TOP_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000000f4)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_REE_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00000100)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_REE_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000100)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_REE_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000100)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_EVT_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000104)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_EVT_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000104)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_EVT_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000104)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_INT_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000108)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_INT_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000108)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_INT_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000108)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_CSR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x0000010c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_CSR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000010c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_CSR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000010c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TLV_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000110)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TLV_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000110)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TLV_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000110)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TIMER_ENG_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000114)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TIMER_ENG_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000114)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_TIMER_ENG_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000114)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_DB_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000118)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_DB_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000118)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_DB_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000118)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_RD_WR_ENG_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000011c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_RD_WR_ENG_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000011c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_RD_WR_ENG_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000011c)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_UCONTROLLER_ENG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000120)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_UCONTROLLER_ENG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000120)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_UCONTROLLER_ENG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000120)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_SDMA_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000124)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_SDMA_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000124)
+#define HWIO_IPA_GSI_TOP_IC_INT_WEIGHT_SDMA_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000124)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					    0x0000003c)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS \
+					    + 0x0000003c)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS \
+					    + 0x0000003c)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CACHEATTR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000094)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CACHEATTR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000094)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_CACHEATTR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000094)
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_LSB_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000140 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_LSB_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000140 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_LSB_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000140 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_MSB_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000144 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_MSB_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000144 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SDMA_SG_IOVEC_MSB_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000144 + 0x8 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_EE_QOS_n_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000300 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_EE_QOS_n_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000300 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_MANAGER_EE_QOS_n_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000300 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000200)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000200)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000200)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR,	\
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OUTM(m, \
+							       v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000204)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000204)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000204)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR,	\
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OUTM(m, \
+							       v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000208)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000208)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000208)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OUT(v) \
+	out_dword(HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR, \
+		  v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OUTM(m, \
+								 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000020c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000020c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000020c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OUT(v) \
+	out_dword(HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR, \
+		  v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OUTM(m, \
+								 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000240)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000240)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000240)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OUT(v) \
+	out_dword(HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR, \
+		  v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OUTM(m, \
+								v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000244)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000244)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000244)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_IN \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OUT(v) \
+	out_dword(HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR, \
+		  v)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OUTM(m, \
+								 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_IN)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_SHRAM_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_SHRAM_PTR_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000248)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000248)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000248)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000024c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000024c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH1_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000024c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000250)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000250)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_MCS_SCRATCH2_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000250)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000254)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000254)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_VP_TRANS_TABLE_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000254)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000258)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000258)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EV_VP_TRANS_TABLE_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000258)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000025c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000025c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_USER_INFO_DATA_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000025c)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000260)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000260)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_EE_CMD_FIFO_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000260)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000264)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000264)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CMD_FIFO_BASE_ADDR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000264)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000400)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000400)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000400)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_OUTM(m, \
+						  v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000404)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000404)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000404)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_INM(m) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR, \
+			m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_OUTM(m, \
+							  v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TLV_CH_NOT_FULL_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000408)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TLV_CH_NOT_FULL_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000408)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000408)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000418)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000418)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000418)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x0000041c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000041c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000041c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000420)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000420)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000420)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_OUTM(m, \
+						  v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000424)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000424)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000424)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR,	\
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_OUTM(m, \
+						       v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000428)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000428)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000428)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_OUTM(m, \
+						    v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000042c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000042c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000042c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_INM(m) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR, \
+			m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_OUTM(m, \
+							  v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000430)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000430)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000430)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000434)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000434)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000434)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000438)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000438)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000438)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000043c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000043c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000043c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_OUTM(m, \
+							 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000440)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000440)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000440)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_INM(m) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR, \
+			m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_OUTM(m, \
+							  v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000444)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000444)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000444)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_OUTM(m, \
+							 v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000448)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000448)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000448)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_INM(m) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_OUTM(m,	\
+						     v)	\
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_ADDR, \
+			    m, \
+			    v, \
+			    HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000044c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000044c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000044c)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_INM(m) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
+			m)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OUT(v) out_dword(	\
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OUTM(m, \
+							   v) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_IN)
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_SDMA_INT_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000450 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_SDMA_INT_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000450 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_SDMA_INT_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000450 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x0001b000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001b000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001b000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_MAXn 8191
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INI(n) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INMI(n, mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OUTI(n, val) \
+		out_dword( \
+			HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
+			val)
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OUTMI(n, mask, val) \
+		out_dword_masked_ns( \
+			HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
+			mask, \
+			val, \
+			HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INI(n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE + \
+					      0x00002000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00002000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00002000 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_MAXn 1343
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n), \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_GSI_SHRAM_n_INI(n))
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_SHRAM_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_SHRAM_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, \
+							 k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00003800 + 0x80 * (n) + 0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_PHYS(n, \
+							 k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00003800 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(n, \
+							 k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00003800 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_RMSK 0x3f
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_MAXn 2
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_MAXk 22
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_INI2(n, \
+							 k) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR( \
+				n, \
+				k), \
+			HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_INMI2(n, k,	\
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_OUTI2(n, k,	\
+							  val) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, \
+								 k), \
+		val)
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_OUTMI2(n, k, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_ADDR(n, k),	\
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f
+#define HWIO_IPA_GSI_TOP_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+						0x00001000)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001000)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001000)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_RMSK 0xf00ff
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR,	\
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_IN)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_HW_EVENTS_SEL_BMSK 0xf0000
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_HW_EVENTS_SEL_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_BMSK 0xff
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_ZEROS_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_0_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_1_FVAL 0x2
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_2_FVAL 0x3
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_3_FVAL 0x4
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_4_FVAL 0x5
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_DB_ENG_FVAL 0x9
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_0_FVAL 0xb
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_1_FVAL 0xc
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_2_FVAL 0xd
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_3_FVAL 0xe
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_4_FVAL 0xf
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_5_FVAL 0x10
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_6_FVAL 0x11
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_7_FVAL 0x12
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_0_FVAL 0x13
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_1_FVAL 0x14
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_2_FVAL 0x15
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_3_FVAL 0x16
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_4_FVAL 0x17
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_EVE_5_FVAL 0x18
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_0_FVAL 0x1b
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_1_FVAL 0x1c
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IE_2_FVAL 0x1d
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_0_FVAL 0x1f
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_1_FVAL 0x20
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_2_FVAL 0x21
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_3_FVAL 0x22
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_4_FVAL 0x23
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_0_FVAL 0x27
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_1_FVAL 0x28
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_2_FVAL 0x29
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MOQA_3_FVAL 0x2a
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_0_FVAL 0x2b
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_1_FVAL 0x2c
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_2_FVAL 0x2d
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TMR_3_FVAL 0x2e
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_0_FVAL \
+	0x33
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_1_FVAL \
+	0x34
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_2_FVAL \
+	0x35
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_RD_WR_3_FVAL \
+	0x36
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_FVAL 0x3a
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SDMA_0_FVAL 0x3c
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SMDA_1_FVAL 0x3d
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_1_FVAL 0x3e
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_2_FVAL 0x3f
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_MCS_5_FVAL 0x40
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_IC_5_FVAL 0x41
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_CSR_3_FVAL 0x42
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_TLV_0_FVAL 0x43
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_REE_8_FVAL 0x44
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+						0x00001008)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001008)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001008)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_ADDR,	\
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_ADDR,	\
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00001010)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001010)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001010)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_RMSK 0x1fff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_SDMA_BUSY_BMSK 0x1000
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_SDMA_BUSY_SHFT 0xc
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_IC_BUSY_BMSK 0x800
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_IC_BUSY_SHFT 0xb
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_UC_BUSY_BMSK 0x400
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_UC_BUSY_SHFT 0xa
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_DBG_CNT_BUSY_BMSK 0x200
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_DBG_CNT_BUSY_SHFT 0x9
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_DB_ENG_BUSY_BMSK 0x100
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_DB_ENG_BUSY_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_BMSK 0x80
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_SHFT 0x7
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_BMSK 0x40
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_SHFT 0x6
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_RD_WR_BUSY_BMSK 0x10
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_RD_WR_BUSY_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_TIMER_BUSY_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_TIMER_BUSY_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_MCS_BUSY_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_MCS_BUSY_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_REE_BUSY_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_REE_BUSY_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_CSR_BUSY_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_BUSY_REG_CSR_BUSY_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001014)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001014)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001014)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_CHID_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING_CHID_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001018)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001018)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001018)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_CHID_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING_CHID_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_ADDR (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000101c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000101c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000101c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_CHID_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING_CHID_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTER_CFGn_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001200 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTER_CFGn_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001200 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTER_CFGn_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001200 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001240 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001240 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001240 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_MAXn 7
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_ADDR(n), \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_COUNTER_VALUE_BMSK 0xffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_COUNTERn_COUNTER_VALUE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001040)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001040)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001040)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_OUT(v) out_dword(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_OUTM(m, \
+						   v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_IN)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00001044)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001044)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001044)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_RMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_IN in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_OUT(v) out_dword( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR, \
+		v)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_IN)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_MCS_STALL_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_STALL_MCS_STALL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001048)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001048)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001048)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_RMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_INM(m) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_IRAM_PTR_BMSK 0xfff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG_IRAM_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_SEL_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001050)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_SEL_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001050)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_SEL_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001050)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_CLR_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001058)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_CLR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001058)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_CLR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001058)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001060)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001060)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001060)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_RMSK 0x1ffff01
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_IN in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ADDR, \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_INM(m) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_SAVED_BMSK \
+	0x1000000
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_SAVED_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_MID_BMSK \
+	0xff0000
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_MID_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_TID_BMSK 0xff00
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_TID_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_WRITE_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_WRITE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_0_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001064)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_0_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001064)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_0_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001064)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_1_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001068)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_1_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001068)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_1_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001068)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_2_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000106c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_2_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000106c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_2_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000106c)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001070 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001070 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001070 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_WRITE_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001080 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_WRITE_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001080 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_WRITE_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001080 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001100 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001100 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001100 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_MAXn 31
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_INI(n) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_INMI(n,	\
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_RF_REG_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SW_RF_n_READ_RF_REG_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_CH_k_VP_TABLE_ADDR(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001400 + 0x80 * (n) + 0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHYS(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001400 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_CH_k_VP_TABLE_OFFS(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001400 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001600 + 0x80 * (n) + 0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHYS(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001600 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_OFFS(n, \
+							   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001600 + 0x80 * (n) + \
+		0x4 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK 0x3f
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXn 3
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXk 19
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_INI2(n, \
+							   k) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_INMI2(n, k, \
+							    mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_ADDR(n, k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_BMSK 0x1f
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SDMA_TRANS_DB_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00001800 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SDMA_TRANS_DB_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00001800 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_DEBUG_SDMA_TRANS_DB_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00001800 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					      0x00000500)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000500)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000500)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_MSK_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000504)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_MSK_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000504)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_MSK_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000504)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_CLR_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00000508)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_CLR_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000508)
+#define HWIO_IPA_GSI_TOP_GSI_UC_SRC_IRQ_CLR_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000508)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ARGS_n_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x0000050c + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ARGS_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000050c + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ARGS_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000050c + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ROUTINE_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					       0x00000524)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ROUTINE_PHYS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000524)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_ROUTINE_OFFS (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000524)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_GO_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					  0x00000528)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_GO_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+					  0x00000528)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_GO_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+					  0x00000528)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_STTS_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000052c)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_STTS_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000052c)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_STTS_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000052c)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_LSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000530)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_LSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000530)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_LSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000530)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_MSB_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000534)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_MSB_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000534)
+#define HWIO_IPA_GSI_TOP_GSI_ACC_2_UC_MCS_RET_VAL_MSB_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000534)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_VLD_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000538)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_VLD_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000538)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_VLD_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000538)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_PC_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x0000053c)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_PC_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000053c)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_PC_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000053c)
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_ARGS_n_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000540 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_ARGS_n_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000540 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_IC_2_UC_MCS_ARGS_n_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000540 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_VLD_ADDR (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00000558)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_VLD_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000558)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_VLD_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000558)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ROUTINE_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000055c)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ROUTINE_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000055c)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ROUTINE_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000055c)
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ARGS_n_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00000560 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ARGS_n_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00000560 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_GSI_UC_TLV_IN_ARGS_n_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00000560 + 0x4 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f000 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f000 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f000 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_RMSK 0xfff7ffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK \
+	0xff000000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_NOT_ALLOCATED_FVAL \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_ALLOCATED_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STARTED_FVAL 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STOPPED_FVAL 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_STOP_IN_PROC_FVAL \
+	0x4
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_ERROR_FVAL 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK 0x7c000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT 0xe
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK	\
+	0x2000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT	\
+	0xd
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHID_BMSK 0x1f00
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHID_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_EE_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_INBOUND_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_OUTBOUND_FVAL	\
+	0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK 0x7
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MHI_FVAL	\
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_XHCI_FVAL \
+	0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_GPI_FVAL	\
+	0x2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_XDCI_FVAL \
+	0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f004 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f004 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f004 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f008 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f008 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f008 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f00c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f00c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f00c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f010 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f010 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f010 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f014 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f014 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f014 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f018 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f018 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f018 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f01c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f01c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f01c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, \
+							      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f054 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_PHYS(n, \
+							      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f054 + 0x4000 * \
+		(n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(n, \
+							      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f054 + 0x4000 * \
+		(n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INI2(n, \
+							      k) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INMI2(n, k, \
+							       mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OUTI2(n, k, \
+							       val) \
+	out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OUTMI2(n, \
+								k, \
+								mask, \
+								val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+							       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f058 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_PHYS(n, \
+							       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f058 + 0x4000 * \
+		(n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(n, \
+							       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f058 + 0x4000 * \
+		(n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INI2(n, \
+							       k) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+								       k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INMI2(n, k, \
+								mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+								       k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OUTI2(n, k, \
+								val) \
+	out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+								       k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OUTMI2(n, \
+								 k, \
+								 mask, \
+								 val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_ADDR(n, \
+								       k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_BMSK \
+	0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, \
+						k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f05c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PHYS(n, \
+						k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f05c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_OFFS(n, \
+						k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f05c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_RMSK 0xff3f0f
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_INMI2(n, k, \
+						 mask) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, \
+							k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_OUTMI2(n, k, mask, \
+						  val) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_ADDR(n, \
+							k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK \
+	0xff0000
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK 0x3c00
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT 0xa
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_ESCAPE_BUF_ONLY_FVAL \
+	0x1
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SMART_PRE_FETCH_FVAL \
+	0x2
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_FREE_PRE_FETCH_FVAL \
+	0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_ONE_PREFETCH_SEG_FVAL \
+	0x0
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_TWO_PREFETCH_SEG_FVAL \
+	0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f060 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f060 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f060 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_INI2(n, \
+						      k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, \
+							      k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_INMI2(n, k, \
+						       mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_OUTI2(n, k, \
+						       val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, \
+							      k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_OUTMI2(n, k, mask, \
+							val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f064 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f064 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f064 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_INI2(n, \
+						      k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, \
+							      k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_INMI2(n, k, \
+						       mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_OUTI2(n, k, \
+						       val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, \
+							      k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_OUTMI2(n, k, mask, \
+							val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f068 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f068 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f068 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_INI2(n, \
+						      k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, \
+							      k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_INMI2(n, k, \
+						       mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_OUTI2(n, k, \
+						       val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, \
+							      k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_OUTMI2(n, k, mask, \
+							val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f06c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f06c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f06c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_MAXk 22
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_INI2(n, \
+						      k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, \
+							      k), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_INMI2(n, k, \
+						       mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_OUTI2(n, k, \
+						       val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, \
+							      k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_OUTMI2(n, k, mask, \
+							val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_ADDR(n,	\
+							     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000f070 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_PHYS(n,	\
+							     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000f070 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DB_ENG_WRITE_PTR_OFFS(n,	\
+							     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000f070 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010000 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010000 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010000 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_RMSK 0xfff1ffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHSTATE_NOT_ALLOCATED_FVAL \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHSTATE_ALLOCATED_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK 0x10000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INTYPE_MSI_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_INTYPE_IRQ_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_EVCHID_BMSK 0xff00
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_EVCHID_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_EE_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_MHI_EV_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_XHCI_EV_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_GPI_EV_FVAL 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_0_CHTYPE_XDCI_FVAL 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010004 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010004 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010004 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_RMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010008 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010008 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010008 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001000c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001000c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001000c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010010 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010010 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010010 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010014 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010014 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010014 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010018 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010018 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010018 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001001c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001001c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001001c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010020 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010020 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010020 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_BMSK 0xff000000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK 0xff0000
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK 0xffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010024 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_PHYS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010024 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_OFFS(n, \
+						   k) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010024 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INMI2(n, k, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(n, \
+							   k), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_OUTMI2(n, k, mask, \
+						     val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_ADDR(	\
+				    n, \
+				    k), mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INI2(n, \
+								       k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010028 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010028 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010028 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001002c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001002c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001002c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010030 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010030 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010030 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010034 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_PHYS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010034 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_OFFS(n, \
+						    k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010034 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_INMI2(n, k, \
+						     mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, \
+							    k),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_OUTMI2(n, k, mask, \
+						      val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00010048 + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_PHYS(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00010048 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_OFFS(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00010048 + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_INMI2(n, k, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_OUTMI2(n, k, mask, \
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001004c + 0x4000 * (n) + \
+		0x80 * \
+		(k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_PHYS(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001004c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_OFFS(n,	\
+						     k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001004c + 0x4000 * \
+		(n) + \
+		0x80 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_MAXk 19
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_INI2(n, k) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k), \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_INMI2(n, k, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR( \
+				n, \
+				k), mask)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_OUTI2(n, k, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_OUTMI2(n, k, mask, \
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_ADDR(n, k), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_INI2(n, k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_0_ADDR(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00011000 + 0x4000 * (n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_0_PHYS(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00011000 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_0_OFFS(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00011000 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_1_ADDR(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00011004 + 0x4000 * (n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_1_PHYS(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00011004 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_k_DOORBELL_1_OFFS(n, \
+						       k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00011004 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_0_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00011100 + 0x4000 * (n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_0_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00011100 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_0_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00011100 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_1_ADDR(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00011104 + 0x4000 * (n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_1_PHYS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00011104 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_k_DOORBELL_1_OFFS(n, \
+						      k) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00011104 + 0x4000 * \
+		(n) + \
+		0x8 * (k))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00012000 + 0x4000 * \
+						  (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012000 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012000 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_RMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ENABLED_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_STATUS_ENABLED_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_CMD_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						  + 0x00012008 + 0x4000 * \
+						  (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_CMD_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012008 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_CH_CMD_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012008 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_CMD_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00012010 + 0x4000 * \
+						 (n))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_CMD_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012010 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_EV_CH_CMD_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012010 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_EE_GENERIC_CMD_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012018 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_EE_GENERIC_CMD_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012018 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_EE_GENERIC_CMD_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012018 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_0_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012038 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_0_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012038 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_0_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012038 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_1_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001203c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_1_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001203c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_1_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001203c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_2_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012040 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_2_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012040 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_2_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012040 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_SW_VERSION_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012044 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_SW_VERSION_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012044 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_SW_VERSION_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012044 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_MCS_CODE_VER_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012048 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_MCS_CODE_VER_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012048 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_MCS_CODE_VER_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012048 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_3_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001204c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_3_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001204c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_GSI_HW_PARAM_3_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001204c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012080 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012080 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012080 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_RMSK 0x7f
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK 0x40
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_GENERAL_SHFT 0x6
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK 0x20
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK 0x10
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_IEOB_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012088 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012088 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012088 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK 0x7f
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR( \
+				n), \
+			mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_OUTMI(n, mask,	\
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_BMSK 0x40
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_SHFT 0x6
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_BMSK \
+	0x20
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_SHFT 0x5
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_BMSK \
+	0x10
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_SHFT 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012090 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012090 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012090 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_INMI(n, \
+							mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012094 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012094 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012094 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_INMI(n, \
+						       mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012098 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012098 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012098 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK 0x7fffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INI(n) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR( \
+			n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INMI(n, \
+							    mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n),	\
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OUTI(n, \
+							    val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OUTMI(n, mask, \
+							     val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_ADDR(n),	\
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_INI(n))
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \
+	0x7fffff
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001209c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001209c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001209c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK 0xfffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INI(n) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR( \
+				n), \
+			HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INMI(n, \
+							   mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OUTI(n, \
+							   val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OUTMI(n, mask, \
+							    val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_INI(n))
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \
+	0xfffff
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120a0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120a0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120a0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ATTR 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OUTI(n, \
+							    val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120a4 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120a4 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120a4 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ATTR 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OUTI(n, \
+							   val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120b0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120b0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120b0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_ADDR( \
+				n), \
+			mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120b8 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120b8 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120b8 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK 0xfffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INI(n) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR( \
+				n), \
+			HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_INI(n))
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \
+	0xfffff
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT \
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x000120c0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x000120c0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x000120c0 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ATTR 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_BMSK	\
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_SHFT	\
+	0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012100 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012100 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012100 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_INMI(n, \
+						       mask) \
+	in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_BMSK 0x8
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_SHFT 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_BMSK 0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_SHFT 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_EN_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012108 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_EN_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012108 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012108 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012110 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012110 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012110 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012118 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012118 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012118 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_RMSK 0xf
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_ATTR 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_ADDR( \
+				n), \
+			mask)
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_BMSK \
+	0x8
+#define	\
+	HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_SHFT \
+	0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_BMSK \
+	0x4
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_SHFT \
+	0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_BMSK 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_SHFT 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_EN_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012120 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_EN_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012120 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_EN_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012120 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012128 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012128 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012128 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012180 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012180 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012180 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_RMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_OUTMI(n, mask, \
+						 val) out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INTYPE_BMSK 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INTYPE_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INTYPE_MSI_FVAL 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INTSET_INTYPE_IRQ_FVAL 0x1
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012188 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012188 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012188 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR( \
+				n), \
+			mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_OUTMI(n, mask,	\
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0001218c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0001218c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0001218c + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_INMI(n, \
+						      mask) \
+	in_dword_masked(HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR( \
+				n), \
+			mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_OUTMI(n, mask,	\
+						       val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_BMSK \
+	0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INT_VEC_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012190 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INT_VEC_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012190 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_INT_VEC_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012190 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE \
+						 + 0x00012200 + 0x4000 * \
+						 (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012200 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012200 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_INI(n) in_dword_masked(	\
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR(n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_OUTI(n, val) out_dword(	\
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_OUTMI(n, mask, \
+					      val) out_dword_masked_ns(	\
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ERROR_LOG_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_ERROR_LOG_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012210 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012210 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012210 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ATTR 0x2
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ERROR_LOG_CLR_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_ERROR_LOG_CLR_ERROR_LOG_CLR_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012400 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012400 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012400 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_INMI(n, \
+						   mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_OUTMI(n, mask, \
+						    val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_ADDR(	\
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_0_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x00012404 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x00012404 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x00012404 + 0x4000 * (n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_RMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_MAXn 2
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ATTR 0x3
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_INI(n) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_RMSK)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_INMI(n, \
+						   mask) in_dword_masked( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_OUTI(n, val) out_dword( \
+		HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR(n), \
+		val)
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_OUTMI(n, mask, \
+						    val) \
+	out_dword_masked_ns(HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_ADDR(	\
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_INI(n))
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define HWIO_IPA_GSI_TOP_EE_n_CNTXT_SCRATCH_1_SCRATCH_SHFT 0x0
+#define HWIO_IPA_GSI_TOP_GSI_MCS_CFG_ADDR (IPA_GSI_TOP_GSI_REG_BASE + \
+					   0x0000b000)
+#define HWIO_IPA_GSI_TOP_GSI_MCS_CFG_PHYS (IPA_GSI_TOP_GSI_REG_BASE_PHYS + \
+					   0x0000b000)
+#define HWIO_IPA_GSI_TOP_GSI_MCS_CFG_OFFS (IPA_GSI_TOP_GSI_REG_BASE_OFFS + \
+					   0x0000b000)
+#define HWIO_IPA_GSI_TOP_GSI_TZ_FW_AUTH_LOCK_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000b008)
+#define HWIO_IPA_GSI_TOP_GSI_TZ_FW_AUTH_LOCK_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000b008)
+#define HWIO_IPA_GSI_TOP_GSI_TZ_FW_AUTH_LOCK_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000b008)
+#define HWIO_IPA_GSI_TOP_GSI_MSA_FW_AUTH_LOCK_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000b010)
+#define HWIO_IPA_GSI_TOP_GSI_MSA_FW_AUTH_LOCK_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000b010)
+#define HWIO_IPA_GSI_TOP_GSI_MSA_FW_AUTH_LOCK_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000b010)
+#define HWIO_IPA_GSI_TOP_GSI_SP_FW_AUTH_LOCK_ADDR ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000b018)
+#define HWIO_IPA_GSI_TOP_GSI_SP_FW_AUTH_LOCK_PHYS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000b018)
+#define HWIO_IPA_GSI_TOP_GSI_SP_FW_AUTH_LOCK_OFFS ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000b018)
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_ORIGINATOR_EE_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c000 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_ORIGINATOR_EE_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c000 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_ORIGINATOR_EE_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c000 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_GSI_CH_CMD_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c008 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_GSI_CH_CMD_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c008 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_GSI_CH_CMD_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c008 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_EV_CH_CMD_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c010 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_EV_CH_CMD_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c010 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_EV_CH_CMD_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c010 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c018 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c018 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c018 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c01c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c01c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c01c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c020 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c020 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c020 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_MSK_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c024 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_MSK_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c024 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c024 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_ADDR(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c028 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_PHYS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c028 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) ( \
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c028 + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_CLR_ADDR(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE + 0x0000c02c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_CLR_PHYS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_PHYS + 0x0000c02c + 0x1000 * (n))
+#define HWIO_IPA_GSI_TOP_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(n) (	\
+		IPA_GSI_TOP_GSI_REG_BASE_OFFS + 0x0000c02c + 0x1000 * (n))
+#define IPA_CFG_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00040000)
+#define IPA_CFG_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + 0x00040000)
+#define IPA_CFG_REG_BASE_OFFS 0x00040000
+#define HWIO_IPA_COMP_HW_VERSION_ADDR (IPA_CFG_REG_BASE + 0x00000030)
+#define HWIO_IPA_COMP_HW_VERSION_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000030)
+#define HWIO_IPA_COMP_HW_VERSION_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000030)
+#define HWIO_IPA_COMP_HW_VERSION_RMSK 0xffffffff
+#define HWIO_IPA_COMP_HW_VERSION_ATTR 0x1
+#define HWIO_IPA_COMP_HW_VERSION_IN in_dword_masked( \
+		HWIO_IPA_COMP_HW_VERSION_ADDR, \
+		HWIO_IPA_COMP_HW_VERSION_RMSK)
+#define HWIO_IPA_COMP_HW_VERSION_INM(m) in_dword_masked( \
+		HWIO_IPA_COMP_HW_VERSION_ADDR, \
+		m)
+#define HWIO_IPA_COMP_HW_VERSION_MAJOR_BMSK 0xf0000000
+#define HWIO_IPA_COMP_HW_VERSION_MAJOR_SHFT 0x1c
+#define HWIO_IPA_COMP_HW_VERSION_MINOR_BMSK 0xfff0000
+#define HWIO_IPA_COMP_HW_VERSION_MINOR_SHFT 0x10
+#define HWIO_IPA_COMP_HW_VERSION_STEP_BMSK 0xffff
+#define HWIO_IPA_COMP_HW_VERSION_STEP_SHFT 0x0
+#define HWIO_IPA_VERSION_ADDR (IPA_CFG_REG_BASE + 0x00000034)
+#define HWIO_IPA_VERSION_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000034)
+#define HWIO_IPA_VERSION_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000034)
+#define HWIO_IPA_ENABLED_PIPES_ADDR (IPA_CFG_REG_BASE + 0x00000038)
+#define HWIO_IPA_ENABLED_PIPES_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000038)
+#define HWIO_IPA_ENABLED_PIPES_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000038)
+#define HWIO_IPA_COMP_CFG_ADDR (IPA_CFG_REG_BASE + 0x0000003c)
+#define HWIO_IPA_COMP_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000003c)
+#define HWIO_IPA_COMP_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000003c)
+#define HWIO_IPA_COMP_CFG_RMSK 0x3fffee
+#define HWIO_IPA_COMP_CFG_ATTR 0x3
+#define HWIO_IPA_COMP_CFG_IN in_dword_masked(HWIO_IPA_COMP_CFG_ADDR, \
+					     HWIO_IPA_COMP_CFG_RMSK)
+#define HWIO_IPA_COMP_CFG_INM(m) in_dword_masked(HWIO_IPA_COMP_CFG_ADDR, m)
+#define HWIO_IPA_COMP_CFG_OUT(v) out_dword(HWIO_IPA_COMP_CFG_ADDR, v)
+#define HWIO_IPA_COMP_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_COMP_CFG_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_COMP_CFG_IN)
+#define HWIO_IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK 0x200000
+#define HWIO_IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_SHFT 0x15
+#define HWIO_IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_BMSK 0x1e0000
+#define HWIO_IPA_COMP_CFG_IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_SHFT 0x11
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_BMSK 0x10000
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_GLOBAL_EN_SHFT 0x10
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_BMSK 0x8000
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_AXI_MASTERS_DIS_SHFT 0xf
+#define HWIO_IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK \
+	0x4000
+#define HWIO_IPA_COMP_CFG_GSI_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT 0xe
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_BMSK \
+	0x2000
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_SNOC_CNOC_LOOP_PROTECTION_DISABLE_SHFT \
+	0xd
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_BMSK 0x1000
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_WR_DIS_SHFT 0xc
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_BMSK 0x800
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_WR_DIS_SHFT 0xb
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_BMSK 0x400
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_MULTI_INORDER_RD_DIS_SHFT 0xa
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_BMSK 0x200
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_MULTI_INORDER_RD_DIS_SHFT 0x9
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_BMSK 0x100
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_INORDER_WR_DIS_SHFT 0x8
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_BMSK 0x80
+#define HWIO_IPA_COMP_CFG_GSI_MULTI_INORDER_RD_DIS_SHFT 0x7
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_BMSK 0x40
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_PROD_EN_SHFT 0x6
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_BMSK 0x20
+#define HWIO_IPA_COMP_CFG_IPA_QMB_SELECT_BY_ADDRESS_CONS_EN_SHFT 0x5
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_BMSK 0x8
+#define HWIO_IPA_COMP_CFG_GEN_QMB_1_SNOC_BYPASS_DIS_SHFT 0x3
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_BMSK 0x4
+#define HWIO_IPA_COMP_CFG_GEN_QMB_0_SNOC_BYPASS_DIS_SHFT 0x2
+#define HWIO_IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_BMSK 0x2
+#define HWIO_IPA_COMP_CFG_GSI_SNOC_BYPASS_DIS_SHFT 0x1
+#define HWIO_IPA_CLKON_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000044)
+#define HWIO_IPA_CLKON_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000044)
+#define HWIO_IPA_CLKON_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000044)
+#define HWIO_IPA_ROUTE_ADDR (IPA_CFG_REG_BASE + 0x00000048)
+#define HWIO_IPA_ROUTE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000048)
+#define HWIO_IPA_ROUTE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000048)
+#define HWIO_IPA_ROUTE_RMSK 0x13fffff
+#define HWIO_IPA_ROUTE_ATTR 0x3
+#define HWIO_IPA_ROUTE_IN in_dword_masked(HWIO_IPA_ROUTE_ADDR, \
+					  HWIO_IPA_ROUTE_RMSK)
+#define HWIO_IPA_ROUTE_INM(m) in_dword_masked(HWIO_IPA_ROUTE_ADDR, m)
+#define HWIO_IPA_ROUTE_OUT(v) out_dword(HWIO_IPA_ROUTE_ADDR, v)
+#define HWIO_IPA_ROUTE_OUTM(m, v) out_dword_masked_ns(HWIO_IPA_ROUTE_ADDR, \
+						      m, \
+						      v, \
+						      HWIO_IPA_ROUTE_IN)
+#define HWIO_IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK 0x1000000
+#define HWIO_IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18
+#define HWIO_IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
+#define HWIO_IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
+#define HWIO_IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define HWIO_IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define HWIO_IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0x40
+#define HWIO_IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define HWIO_IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define HWIO_IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define HWIO_IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define HWIO_IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+#define HWIO_IPA_FILTER_ADDR (IPA_CFG_REG_BASE + 0x0000004c)
+#define HWIO_IPA_FILTER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000004c)
+#define HWIO_IPA_FILTER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000004c)
+#define HWIO_IPA_MASTER_PRIORITY_ADDR (IPA_CFG_REG_BASE + 0x00000050)
+#define HWIO_IPA_MASTER_PRIORITY_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000050)
+#define HWIO_IPA_MASTER_PRIORITY_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000050)
+#define HWIO_IPA_SHARED_MEM_SIZE_ADDR (IPA_CFG_REG_BASE + 0x00000054)
+#define HWIO_IPA_SHARED_MEM_SIZE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000054)
+#define HWIO_IPA_SHARED_MEM_SIZE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000054)
+#define HWIO_IPA_NAT_TIMER_ADDR (IPA_CFG_REG_BASE + 0x00000058)
+#define HWIO_IPA_NAT_TIMER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000058)
+#define HWIO_IPA_NAT_TIMER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000058)
+#define HWIO_IPA_TAG_TIMER_ADDR (IPA_CFG_REG_BASE + 0x00000060)
+#define HWIO_IPA_TAG_TIMER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000060)
+#define HWIO_IPA_TAG_TIMER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000060)
+#define HWIO_IPA_FRAG_RULES_CLR_ADDR (IPA_CFG_REG_BASE + 0x0000006c)
+#define HWIO_IPA_FRAG_RULES_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000006c)
+#define HWIO_IPA_FRAG_RULES_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000006c)
+#define HWIO_IPA_PROC_IPH_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000070)
+#define HWIO_IPA_PROC_IPH_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000070)
+#define HWIO_IPA_PROC_IPH_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000070)
+#define HWIO_IPA_PROC_IPH_CFG_RMSK 0x1ff0ff7
+#define HWIO_IPA_PROC_IPH_CFG_ATTR 0x3
+#define HWIO_IPA_PROC_IPH_CFG_IN in_dword_masked( \
+		HWIO_IPA_PROC_IPH_CFG_ADDR, \
+		HWIO_IPA_PROC_IPH_CFG_RMSK)
+#define HWIO_IPA_PROC_IPH_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_PROC_IPH_CFG_ADDR, \
+		m)
+#define HWIO_IPA_PROC_IPH_CFG_OUT(v) out_dword(HWIO_IPA_PROC_IPH_CFG_ADDR, \
+					       v)
+#define HWIO_IPA_PROC_IPH_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_PROC_IPH_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_PROC_IPH_CFG_IN)
+#define HWIO_IPA_PROC_IPH_CFG_D_DCPH_MULTI_ENGINE_DISABLE_BMSK 0x1000000
+#define HWIO_IPA_PROC_IPH_CFG_D_DCPH_MULTI_ENGINE_DISABLE_SHFT 0x18
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_VALUE_BMSK \
+	0xff0000
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_VALUE_SHFT 0x10
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_IHL_TO_2ND_FRAG_EN_BMSK 0x800
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_IHL_TO_2ND_FRAG_EN_SHFT 0xb
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_DEST_BMSK 0x400
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_DEST_SHFT 0xa
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_HOP_BMSK 0x200
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_HOP_SHFT 0x9
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_ENABLE_BMSK \
+	0x100
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PKT_PARSER_PROTOCOL_STOP_ENABLE_SHFT 0x8
+#define HWIO_IPA_PROC_IPH_CFG_FTCH_DCPH_OVERLAP_ENABLE_BMSK 0x80
+#define HWIO_IPA_PROC_IPH_CFG_FTCH_DCPH_OVERLAP_ENABLE_SHFT 0x7
+#define HWIO_IPA_PROC_IPH_CFG_PIPESTAGE_OVERLAP_DISABLE_BMSK 0x40
+#define HWIO_IPA_PROC_IPH_CFG_PIPESTAGE_OVERLAP_DISABLE_SHFT 0x6
+#define HWIO_IPA_PROC_IPH_CFG_STATUS_FROM_IPH_FRST_ALWAYS_BMSK 0x10
+#define HWIO_IPA_PROC_IPH_CFG_STATUS_FROM_IPH_FRST_ALWAYS_SHFT 0x4
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PIPELINING_DISABLE_BMSK 0x4
+#define HWIO_IPA_PROC_IPH_CFG_IPH_PIPELINING_DISABLE_SHFT 0x2
+#define HWIO_IPA_PROC_IPH_CFG_IPH_THRESHOLD_BMSK 0x3
+#define HWIO_IPA_PROC_IPH_CFG_IPH_THRESHOLD_SHFT 0x0
+#define HWIO_IPA_QSB_MAX_WRITES_ADDR (IPA_CFG_REG_BASE + 0x00000074)
+#define HWIO_IPA_QSB_MAX_WRITES_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000074)
+#define HWIO_IPA_QSB_MAX_WRITES_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000074)
+#define HWIO_IPA_QSB_MAX_READS_ADDR (IPA_CFG_REG_BASE + 0x00000078)
+#define HWIO_IPA_QSB_MAX_READS_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000078)
+#define HWIO_IPA_QSB_MAX_READS_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000078)
+#define HWIO_IPA_QSB_OUTSTANDING_COUNTER_ADDR (IPA_CFG_REG_BASE + \
+					       0x0000007c)
+#define HWIO_IPA_QSB_OUTSTANDING_COUNTER_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x0000007c)
+#define HWIO_IPA_QSB_OUTSTANDING_COUNTER_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x0000007c)
+#define HWIO_IPA_QSB_OUTSTANDING_BEATS_COUNTER_ADDR (IPA_CFG_REG_BASE +	\
+						     0x00000080)
+#define HWIO_IPA_QSB_OUTSTANDING_BEATS_COUNTER_PHYS (IPA_CFG_REG_BASE_PHYS \
+						     + 0x00000080)
+#define HWIO_IPA_QSB_OUTSTANDING_BEATS_COUNTER_OFFS (IPA_CFG_REG_BASE_OFFS \
+						     + 0x00000080)
+#define HWIO_IPA_QSB_READ_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000084)
+#define HWIO_IPA_QSB_READ_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000084)
+#define HWIO_IPA_QSB_READ_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000084)
+#define HWIO_IPA_DPL_TIMER_LSB_ADDR (IPA_CFG_REG_BASE + 0x00000088)
+#define HWIO_IPA_DPL_TIMER_LSB_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000088)
+#define HWIO_IPA_DPL_TIMER_LSB_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000088)
+#define HWIO_IPA_DPL_TIMER_LSB_RMSK 0xffffffff
+#define HWIO_IPA_DPL_TIMER_LSB_ATTR 0x3
+#define HWIO_IPA_DPL_TIMER_LSB_IN in_dword_masked( \
+		HWIO_IPA_DPL_TIMER_LSB_ADDR, \
+		HWIO_IPA_DPL_TIMER_LSB_RMSK)
+#define HWIO_IPA_DPL_TIMER_LSB_INM(m) in_dword_masked( \
+		HWIO_IPA_DPL_TIMER_LSB_ADDR, \
+		m)
+#define HWIO_IPA_DPL_TIMER_LSB_OUT(v) out_dword( \
+		HWIO_IPA_DPL_TIMER_LSB_ADDR, \
+		v)
+#define HWIO_IPA_DPL_TIMER_LSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_DPL_TIMER_LSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_DPL_TIMER_LSB_IN)
+#define HWIO_IPA_DPL_TIMER_LSB_TOD_LSB_BMSK 0xffffffff
+#define HWIO_IPA_DPL_TIMER_LSB_TOD_LSB_SHFT 0x0
+#define HWIO_IPA_DPL_TIMER_MSB_ADDR (IPA_CFG_REG_BASE + 0x0000008c)
+#define HWIO_IPA_DPL_TIMER_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000008c)
+#define HWIO_IPA_DPL_TIMER_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000008c)
+#define HWIO_IPA_DPL_TIMER_MSB_RMSK 0x8000ffff
+#define HWIO_IPA_DPL_TIMER_MSB_ATTR 0x3
+#define HWIO_IPA_DPL_TIMER_MSB_IN in_dword_masked( \
+		HWIO_IPA_DPL_TIMER_MSB_ADDR, \
+		HWIO_IPA_DPL_TIMER_MSB_RMSK)
+#define HWIO_IPA_DPL_TIMER_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_DPL_TIMER_MSB_ADDR, \
+		m)
+#define HWIO_IPA_DPL_TIMER_MSB_OUT(v) out_dword( \
+		HWIO_IPA_DPL_TIMER_MSB_ADDR, \
+		v)
+#define HWIO_IPA_DPL_TIMER_MSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_DPL_TIMER_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_DPL_TIMER_MSB_IN)
+#define HWIO_IPA_DPL_TIMER_MSB_TIMER_EN_BMSK 0x80000000
+#define HWIO_IPA_DPL_TIMER_MSB_TIMER_EN_SHFT 0x1f
+#define HWIO_IPA_DPL_TIMER_MSB_TOD_MSB_BMSK 0xffff
+#define HWIO_IPA_DPL_TIMER_MSB_TOD_MSB_SHFT 0x0
+#define HWIO_IPA_STATE_TX_WRAPPER_ADDR (IPA_CFG_REG_BASE + 0x00000090)
+#define HWIO_IPA_STATE_TX_WRAPPER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000090)
+#define HWIO_IPA_STATE_TX_WRAPPER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000090)
+#define HWIO_IPA_STATE_TX_WRAPPER_RMSK 0x1e01ffff
+#define HWIO_IPA_STATE_TX_WRAPPER_ATTR 0x1
+#define HWIO_IPA_STATE_TX_WRAPPER_IN in_dword_masked( \
+		HWIO_IPA_STATE_TX_WRAPPER_ADDR,	\
+		HWIO_IPA_STATE_TX_WRAPPER_RMSK)
+#define HWIO_IPA_STATE_TX_WRAPPER_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_TX_WRAPPER_ADDR,	\
+		m)
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK 0x1e000000
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT 0x19
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK 0x10000
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT 0x10
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK 0x8000
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT 0xf
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_BMSK 0x6000
+#define HWIO_IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_SHFT 0xd
+#define HWIO_IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_BMSK 0x1800
+#define HWIO_IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_SHFT 0xb
+#define HWIO_IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_BMSK 0x400
+#define HWIO_IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_SHFT 0xa
+#define HWIO_IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_BMSK 0x200
+#define HWIO_IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_SHFT 0x9
+#define HWIO_IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_BMSK 0x180
+#define HWIO_IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_SHFT 0x7
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_BMSK 0x40
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_SHFT 0x6
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_BMSK 0x20
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_SHFT 0x5
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK 0x10
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT 0x4
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK 0x8
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT 0x3
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK 0x4
+#define HWIO_IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT 0x2
+#define HWIO_IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK 0x2
+#define HWIO_IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT 0x1
+#define HWIO_IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_TX1_ADDR (IPA_CFG_REG_BASE + 0x00000094)
+#define HWIO_IPA_STATE_TX1_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000094)
+#define HWIO_IPA_STATE_TX1_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000094)
+#define HWIO_IPA_STATE_TX1_RMSK 0xffffffff
+#define HWIO_IPA_STATE_TX1_ATTR 0x1
+#define HWIO_IPA_STATE_TX1_IN in_dword_masked(HWIO_IPA_STATE_TX1_ADDR, \
+					      HWIO_IPA_STATE_TX1_RMSK)
+#define HWIO_IPA_STATE_TX1_INM(m) in_dword_masked(HWIO_IPA_STATE_TX1_ADDR, \
+						  m)
+#define HWIO_IPA_STATE_TX1_SUSPEND_REQ_EMPTY_BMSK 0x80000000
+#define HWIO_IPA_STATE_TX1_SUSPEND_REQ_EMPTY_SHFT 0x1f
+#define HWIO_IPA_STATE_TX1_LAST_CMD_PIPE_BMSK 0x7c000000
+#define HWIO_IPA_STATE_TX1_LAST_CMD_PIPE_SHFT 0x1a
+#define HWIO_IPA_STATE_TX1_CS_SNIF_IDLE_BMSK 0x2000000
+#define HWIO_IPA_STATE_TX1_CS_SNIF_IDLE_SHFT 0x19
+#define HWIO_IPA_STATE_TX1_SUSPEND_EMPTY_BMSK 0x1000000
+#define HWIO_IPA_STATE_TX1_SUSPEND_EMPTY_SHFT 0x18
+#define HWIO_IPA_STATE_TX1_RSRCREL_IDLE_BMSK 0x800000
+#define HWIO_IPA_STATE_TX1_RSRCREL_IDLE_SHFT 0x17
+#define HWIO_IPA_STATE_TX1_HOLB_MASK_IDLE_BMSK 0x400000
+#define HWIO_IPA_STATE_TX1_HOLB_MASK_IDLE_SHFT 0x16
+#define HWIO_IPA_STATE_TX1_HOLB_IDLE_BMSK 0x200000
+#define HWIO_IPA_STATE_TX1_HOLB_IDLE_SHFT 0x15
+#define HWIO_IPA_STATE_TX1_ALIGNER_EMPTY_BMSK 0x100000
+#define HWIO_IPA_STATE_TX1_ALIGNER_EMPTY_SHFT 0x14
+#define HWIO_IPA_STATE_TX1_PF_EMPTY_BMSK 0x80000
+#define HWIO_IPA_STATE_TX1_PF_EMPTY_SHFT 0x13
+#define HWIO_IPA_STATE_TX1_PF_IDLE_BMSK 0x40000
+#define HWIO_IPA_STATE_TX1_PF_IDLE_SHFT 0x12
+#define HWIO_IPA_STATE_TX1_DMAW_LAST_OUTSD_IDLE_BMSK 0x20000
+#define HWIO_IPA_STATE_TX1_DMAW_LAST_OUTSD_IDLE_SHFT 0x11
+#define HWIO_IPA_STATE_TX1_DMAW_IDLE_BMSK 0x10000
+#define HWIO_IPA_STATE_TX1_DMAW_IDLE_SHFT 0x10
+#define HWIO_IPA_STATE_TX1_AR_IDLE_BMSK 0x8000
+#define HWIO_IPA_STATE_TX1_AR_IDLE_SHFT 0xf
+#define HWIO_IPA_STATE_TX1_TX_CMD_BRESP_INJ_IDLE_BMSK 0x4000
+#define HWIO_IPA_STATE_TX1_TX_CMD_BRESP_INJ_IDLE_SHFT 0xe
+#define HWIO_IPA_STATE_TX1_TX_CMD_BRESP_ALOC_IDLE_BMSK 0x2000
+#define HWIO_IPA_STATE_TX1_TX_CMD_BRESP_ALOC_IDLE_SHFT 0xd
+#define HWIO_IPA_STATE_TX1_TX_CMD_SNIF_IDLE_BMSK 0x1000
+#define HWIO_IPA_STATE_TX1_TX_CMD_SNIF_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_TX1_TX_CMD_TRNSEQ_IDLE_BMSK 0x800
+#define HWIO_IPA_STATE_TX1_TX_CMD_TRNSEQ_IDLE_SHFT 0xb
+#define HWIO_IPA_STATE_TX1_TX_CMD_MAIN_IDLE_BMSK 0x400
+#define HWIO_IPA_STATE_TX1_TX_CMD_MAIN_IDLE_SHFT 0xa
+#define HWIO_IPA_STATE_TX1_PA_PUB_CNT_EMPTY_BMSK 0x200
+#define HWIO_IPA_STATE_TX1_PA_PUB_CNT_EMPTY_SHFT 0x9
+#define HWIO_IPA_STATE_TX1_PA_RST_IDLE_BMSK 0x100
+#define HWIO_IPA_STATE_TX1_PA_RST_IDLE_SHFT 0x8
+#define HWIO_IPA_STATE_TX1_PA_CTX_IDLE_BMSK 0x80
+#define HWIO_IPA_STATE_TX1_PA_CTX_IDLE_SHFT 0x7
+#define HWIO_IPA_STATE_TX1_PA_IDLE_BMSK 0x40
+#define HWIO_IPA_STATE_TX1_PA_IDLE_SHFT 0x6
+#define HWIO_IPA_STATE_TX1_ARBIT_TYPE_BMSK 0x38
+#define HWIO_IPA_STATE_TX1_ARBIT_TYPE_SHFT 0x3
+#define HWIO_IPA_STATE_TX1_FLOPPED_ARBIT_TYPE_BMSK 0x7
+#define HWIO_IPA_STATE_TX1_FLOPPED_ARBIT_TYPE_SHFT 0x0
+#define HWIO_IPA_STATE_FETCHER_ADDR (IPA_CFG_REG_BASE + 0x00000098)
+#define HWIO_IPA_STATE_FETCHER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000098)
+#define HWIO_IPA_STATE_FETCHER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000098)
+#define HWIO_IPA_STATE_FETCHER_RMSK 0xfffff
+#define HWIO_IPA_STATE_FETCHER_ATTR 0x1
+#define HWIO_IPA_STATE_FETCHER_IN in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_ADDR, \
+		HWIO_IPA_STATE_FETCHER_RMSK)
+#define HWIO_IPA_STATE_FETCHER_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_ADDR, \
+		m)
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_IMM_CMD_EXEC_STATE_IDLE_BMSK \
+	0x80000
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_IMM_CMD_EXEC_STATE_IDLE_SHFT 0x13
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_DMAR_SLOT_STATE_IDLE_BMSK 0x7f000
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_DMAR_SLOT_STATE_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_DMAR_STATE_IDLE_BMSK 0xfe0
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_DMAR_STATE_IDLE_SHFT 0x5
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_CMPLT_STATE_IDLE_BMSK 0x10
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_CMPLT_STATE_IDLE_SHFT 0x4
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_IMM_STATE_IDLE_BMSK 0x8
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_IMM_STATE_IDLE_SHFT 0x3
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_PKT_STATE_IDLE_BMSK 0x4
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_PKT_STATE_IDLE_SHFT 0x2
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_ALLOC_STATE_IDLE_BMSK 0x2
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_ALLOC_STATE_IDLE_SHFT 0x1
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_STATE_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_FETCHER_IPA_HPS_FTCH_STATE_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_FETCHER_MASK_0_ADDR (IPA_CFG_REG_BASE + 0x0000009c)
+#define HWIO_IPA_STATE_FETCHER_MASK_0_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x0000009c)
+#define HWIO_IPA_STATE_FETCHER_MASK_0_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x0000009c)
+#define HWIO_IPA_STATE_FETCHER_MASK_0_RMSK 0xffffffff
+#define HWIO_IPA_STATE_FETCHER_MASK_0_ATTR 0x1
+#define HWIO_IPA_STATE_FETCHER_MASK_0_IN in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_MASK_0_ADDR, \
+		HWIO_IPA_STATE_FETCHER_MASK_0_RMSK)
+#define HWIO_IPA_STATE_FETCHER_MASK_0_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_MASK_0_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_NO_RESOURCES_HPS_DMAR_BMSK \
+	0xff000000
+#define	\
+	HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_NO_RESOURCES_HPS_DMAR_SHFT \
+	0x18
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_NO_RESOURCES_CONTEXT_BMSK \
+	0xff0000
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_NO_RESOURCES_CONTEXT_SHFT \
+	0x10
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_IMM_EXEC_BMSK 0xff00
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_IMM_EXEC_SHFT 0x8
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_DMAR_USES_QUEUE_BMSK 0xff
+#define HWIO_IPA_STATE_FETCHER_MASK_0_MASK_QUEUE_DMAR_USES_QUEUE_SHFT 0x0
+#define HWIO_IPA_STATE_FETCHER_MASK_1_ADDR (IPA_CFG_REG_BASE + 0x000000cc)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x000000cc)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x000000cc)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_RMSK 0xffffffff
+#define HWIO_IPA_STATE_FETCHER_MASK_1_ATTR 0x1
+#define HWIO_IPA_STATE_FETCHER_MASK_1_IN in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_MASK_1_ADDR, \
+		HWIO_IPA_STATE_FETCHER_MASK_1_RMSK)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_FETCHER_MASK_1_ADDR, \
+		m)
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_NO_SPACE_DPL_FIFO_BMSK	\
+	0xff000000
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_NO_SPACE_DPL_FIFO_SHFT	\
+	0x18
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_STEP_MODE_BMSK 0xff0000
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_STEP_MODE_SHFT 0x10
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_ARB_LOCK_BMSK 0xff00
+#define HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_ARB_LOCK_SHFT 0x8
+#define	\
+	HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_NO_RESOURCES_ACK_ENTRY_BMSK \
+	0xff
+#define	\
+	HWIO_IPA_STATE_FETCHER_MASK_1_MASK_QUEUE_NO_RESOURCES_ACK_ENTRY_SHFT \
+	0x0
+#define HWIO_IPA_STATE_DPL_FIFO_ADDR (IPA_CFG_REG_BASE + 0x000000d0)
+#define HWIO_IPA_STATE_DPL_FIFO_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000d0)
+#define HWIO_IPA_STATE_DPL_FIFO_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000d0)
+#define HWIO_IPA_STATE_DPL_FIFO_RMSK 0x7
+#define HWIO_IPA_STATE_DPL_FIFO_ATTR 0x1
+#define HWIO_IPA_STATE_DPL_FIFO_IN in_dword_masked( \
+		HWIO_IPA_STATE_DPL_FIFO_ADDR, \
+		HWIO_IPA_STATE_DPL_FIFO_RMSK)
+#define HWIO_IPA_STATE_DPL_FIFO_INM(m) in_dword_masked(	\
+		HWIO_IPA_STATE_DPL_FIFO_ADDR, \
+		m)
+#define HWIO_IPA_STATE_DPL_FIFO_POP_FSM_STATE_BMSK 0x7
+#define HWIO_IPA_STATE_DPL_FIFO_POP_FSM_STATE_SHFT 0x0
+#define HWIO_IPA_STATE_COAL_MASTER_ADDR (IPA_CFG_REG_BASE + 0x000000d4)
+#define HWIO_IPA_STATE_COAL_MASTER_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x000000d4)
+#define HWIO_IPA_STATE_COAL_MASTER_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x000000d4)
+#define HWIO_IPA_STATE_COAL_MASTER_RMSK 0xffffffff
+#define HWIO_IPA_STATE_COAL_MASTER_ATTR 0x1
+#define HWIO_IPA_STATE_COAL_MASTER_IN in_dword_masked( \
+		HWIO_IPA_STATE_COAL_MASTER_ADDR, \
+		HWIO_IPA_STATE_COAL_MASTER_RMSK)
+#define HWIO_IPA_STATE_COAL_MASTER_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_COAL_MASTER_ADDR, \
+		m)
+#define HWIO_IPA_STATE_COAL_MASTER_VP_TIMER_EXPIRED_BMSK 0xf0000000
+#define HWIO_IPA_STATE_COAL_MASTER_VP_TIMER_EXPIRED_SHFT 0x1c
+#define HWIO_IPA_STATE_COAL_MASTER_LRU_VP_BMSK 0xf000000
+#define HWIO_IPA_STATE_COAL_MASTER_LRU_VP_SHFT 0x18
+#define HWIO_IPA_STATE_COAL_MASTER_INIT_VP_FSM_STATE_BMSK 0xf00000
+#define HWIO_IPA_STATE_COAL_MASTER_INIT_VP_FSM_STATE_SHFT 0x14
+#define HWIO_IPA_STATE_COAL_MASTER_CHECK_FIT_FSM_STATE_BMSK 0xf0000
+#define HWIO_IPA_STATE_COAL_MASTER_CHECK_FIT_FSM_STATE_SHFT 0x10
+#define HWIO_IPA_STATE_COAL_MASTER_HASH_CALC_FSM_STATE_BMSK 0xf000
+#define HWIO_IPA_STATE_COAL_MASTER_HASH_CALC_FSM_STATE_SHFT 0xc
+#define HWIO_IPA_STATE_COAL_MASTER_FIND_OPEN_FSM_STATE_BMSK 0xf00
+#define HWIO_IPA_STATE_COAL_MASTER_FIND_OPEN_FSM_STATE_SHFT 0x8
+#define HWIO_IPA_STATE_COAL_MASTER_MAIN_FSM_STATE_BMSK 0xf0
+#define HWIO_IPA_STATE_COAL_MASTER_MAIN_FSM_STATE_SHFT 0x4
+#define HWIO_IPA_STATE_COAL_MASTER_VP_VLD_BMSK 0xf
+#define HWIO_IPA_STATE_COAL_MASTER_VP_VLD_SHFT 0x0
+#define HWIO_IPA_STATE_DFETCHER_ADDR (IPA_CFG_REG_BASE + 0x000000a0)
+#define HWIO_IPA_STATE_DFETCHER_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000a0)
+#define HWIO_IPA_STATE_DFETCHER_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000a0)
+#define HWIO_IPA_STATE_DFETCHER_RMSK 0x3f3f3
+#define HWIO_IPA_STATE_DFETCHER_ATTR 0x1
+#define HWIO_IPA_STATE_DFETCHER_IN in_dword_masked( \
+		HWIO_IPA_STATE_DFETCHER_ADDR, \
+		HWIO_IPA_STATE_DFETCHER_RMSK)
+#define HWIO_IPA_STATE_DFETCHER_INM(m) in_dword_masked(	\
+		HWIO_IPA_STATE_DFETCHER_ADDR, \
+		m)
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_DMAR_SLOT_STATE_IDLE_BMSK 0x3f000
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_DMAR_SLOT_STATE_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_DMAR_STATE_IDLE_BMSK 0x3f0
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_DMAR_STATE_IDLE_SHFT 0x4
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_FTCH_CMPLT_STATE_IDLE_BMSK 0x2
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_FTCH_CMPLT_STATE_IDLE_SHFT 0x1
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_FTCH_PKT_STATE_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_DFETCHER_IPA_DPS_FTCH_PKT_STATE_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_ACL_ADDR (IPA_CFG_REG_BASE + 0x000000a4)
+#define HWIO_IPA_STATE_ACL_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000a4)
+#define HWIO_IPA_STATE_ACL_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000a4)
+#define HWIO_IPA_STATE_ACL_RMSK 0xffcffff
+#define HWIO_IPA_STATE_ACL_ATTR 0x1
+#define HWIO_IPA_STATE_ACL_IN in_dword_masked(HWIO_IPA_STATE_ACL_ADDR, \
+					      HWIO_IPA_STATE_ACL_RMSK)
+#define HWIO_IPA_STATE_ACL_INM(m) in_dword_masked(HWIO_IPA_STATE_ACL_ADDR, \
+						  m)
+#define HWIO_IPA_STATE_ACL_IPA_HPS_COAL_MASTER_ACTIVE_BMSK 0x8000000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_COAL_MASTER_ACTIVE_SHFT 0x1b
+#define HWIO_IPA_STATE_ACL_IPA_HPS_COAL_MASTER_EMPTY_BMSK 0x4000000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_COAL_MASTER_EMPTY_SHFT 0x1a
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2ND_ACTIVE_BMSK 0x2000000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2ND_ACTIVE_SHFT 0x19
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2ND_EMPTY_BMSK 0x1000000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2ND_EMPTY_SHFT 0x18
+#define HWIO_IPA_STATE_ACL_IPA_DPS_SEQUENCER_IDLE_BMSK 0x800000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_SEQUENCER_IDLE_SHFT 0x17
+#define HWIO_IPA_STATE_ACL_IPA_HPS_SEQUENCER_IDLE_BMSK 0x400000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_SEQUENCER_IDLE_SHFT 0x16
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2_ACTIVE_BMSK 0x200000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2_ACTIVE_SHFT 0x15
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2_EMPTY_BMSK 0x100000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_2_EMPTY_SHFT 0x14
+#define HWIO_IPA_STATE_ACL_IPA_DPS_DISPATCHER_ACTIVE_BMSK 0x80000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_DISPATCHER_ACTIVE_SHFT 0x13
+#define HWIO_IPA_STATE_ACL_IPA_DPS_DISPATCHER_EMPTY_BMSK 0x40000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_DISPATCHER_EMPTY_SHFT 0x12
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_ACTIVE_BMSK 0x8000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_ACTIVE_SHFT 0xf
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_EMPTY_BMSK 0x4000
+#define HWIO_IPA_STATE_ACL_IPA_DPS_D_DCPH_EMPTY_SHFT 0xe
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ENQUEUER_ACTIVE_BMSK 0x2000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ENQUEUER_ACTIVE_SHFT 0xd
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ENQUEUER_EMPTY_BMSK 0x1000
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ENQUEUER_EMPTY_SHFT 0xc
+#define HWIO_IPA_STATE_ACL_IPA_HPS_UCP_ACTIVE_BMSK 0x800
+#define HWIO_IPA_STATE_ACL_IPA_HPS_UCP_ACTIVE_SHFT 0xb
+#define HWIO_IPA_STATE_ACL_IPA_HPS_UCP_EMPTY_BMSK 0x400
+#define HWIO_IPA_STATE_ACL_IPA_HPS_UCP_EMPTY_SHFT 0xa
+#define HWIO_IPA_STATE_ACL_IPA_HPS_HDRI_ACTIVE_BMSK 0x200
+#define HWIO_IPA_STATE_ACL_IPA_HPS_HDRI_ACTIVE_SHFT 0x9
+#define HWIO_IPA_STATE_ACL_IPA_HPS_HDRI_EMPTY_BMSK 0x100
+#define HWIO_IPA_STATE_ACL_IPA_HPS_HDRI_EMPTY_SHFT 0x8
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ROUTER_ACTIVE_BMSK 0x80
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ROUTER_ACTIVE_SHFT 0x7
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ROUTER_EMPTY_BMSK 0x40
+#define HWIO_IPA_STATE_ACL_IPA_HPS_ROUTER_EMPTY_SHFT 0x6
+#define HWIO_IPA_STATE_ACL_IPA_HPS_FILTER_NAT_ACTIVE_BMSK 0x20
+#define HWIO_IPA_STATE_ACL_IPA_HPS_FILTER_NAT_ACTIVE_SHFT 0x5
+#define HWIO_IPA_STATE_ACL_IPA_HPS_FILTER_NAT_EMPTY_BMSK 0x10
+#define HWIO_IPA_STATE_ACL_IPA_HPS_FILTER_NAT_EMPTY_SHFT 0x4
+#define HWIO_IPA_STATE_ACL_IPA_HPS_PKT_PARSER_ACTIVE_BMSK 0x8
+#define HWIO_IPA_STATE_ACL_IPA_HPS_PKT_PARSER_ACTIVE_SHFT 0x3
+#define HWIO_IPA_STATE_ACL_IPA_HPS_PKT_PARSER_EMPTY_BMSK 0x4
+#define HWIO_IPA_STATE_ACL_IPA_HPS_PKT_PARSER_EMPTY_SHFT 0x2
+#define HWIO_IPA_STATE_ACL_IPA_HPS_H_DCPH_ACTIVE_BMSK 0x2
+#define HWIO_IPA_STATE_ACL_IPA_HPS_H_DCPH_ACTIVE_SHFT 0x1
+#define HWIO_IPA_STATE_ACL_IPA_HPS_H_DCPH_EMPTY_BMSK 0x1
+#define HWIO_IPA_STATE_ACL_IPA_HPS_H_DCPH_EMPTY_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_TLV_ADDR (IPA_CFG_REG_BASE + 0x000000b8)
+#define HWIO_IPA_STATE_GSI_TLV_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000b8)
+#define HWIO_IPA_STATE_GSI_TLV_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000b8)
+#define HWIO_IPA_STATE_GSI_TLV_RMSK 0x1
+#define HWIO_IPA_STATE_GSI_TLV_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_TLV_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_TLV_ADDR, \
+		HWIO_IPA_STATE_GSI_TLV_RMSK)
+#define HWIO_IPA_STATE_GSI_TLV_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_GSI_TLV_ADDR, \
+		m)
+#define HWIO_IPA_STATE_GSI_TLV_IPA_GSI_TOGGLE_FSM_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_GSI_TLV_IPA_GSI_TOGGLE_FSM_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_AOS_ADDR (IPA_CFG_REG_BASE + 0x000000bc)
+#define HWIO_IPA_STATE_GSI_AOS_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000bc)
+#define HWIO_IPA_STATE_GSI_AOS_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000bc)
+#define HWIO_IPA_STATE_GSI_AOS_RMSK 0x1
+#define HWIO_IPA_STATE_GSI_AOS_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_AOS_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_AOS_ADDR, \
+		HWIO_IPA_STATE_GSI_AOS_RMSK)
+#define HWIO_IPA_STATE_GSI_AOS_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_GSI_AOS_ADDR, \
+		m)
+#define HWIO_IPA_STATE_GSI_AOS_IPA_GSI_AOS_FSM_IDLE_BMSK 0x1
+#define HWIO_IPA_STATE_GSI_AOS_IPA_GSI_AOS_FSM_IDLE_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_IF_ADDR (IPA_CFG_REG_BASE + 0x000000c0)
+#define HWIO_IPA_STATE_GSI_IF_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000c0)
+#define HWIO_IPA_STATE_GSI_IF_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000c0)
+#define HWIO_IPA_STATE_GSI_IF_RMSK 0xff
+#define HWIO_IPA_STATE_GSI_IF_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_IF_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_IF_ADDR, \
+		HWIO_IPA_STATE_GSI_IF_RMSK)
+#define HWIO_IPA_STATE_GSI_IF_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_GSI_IF_ADDR, \
+		m)
+#define HWIO_IPA_STATE_GSI_IF_IPA_GSI_PROD_FSM_TX_1_BMSK 0xf0
+#define HWIO_IPA_STATE_GSI_IF_IPA_GSI_PROD_FSM_TX_1_SHFT 0x4
+#define HWIO_IPA_STATE_GSI_IF_IPA_GSI_PROD_FSM_TX_0_BMSK 0xf
+#define HWIO_IPA_STATE_GSI_IF_IPA_GSI_PROD_FSM_TX_0_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_SKIP_ADDR (IPA_CFG_REG_BASE + 0x000000c4)
+#define HWIO_IPA_STATE_GSI_SKIP_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000c4)
+#define HWIO_IPA_STATE_GSI_SKIP_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000c4)
+#define HWIO_IPA_STATE_GSI_SKIP_RMSK 0x3
+#define HWIO_IPA_STATE_GSI_SKIP_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_SKIP_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_SKIP_ADDR, \
+		HWIO_IPA_STATE_GSI_SKIP_RMSK)
+#define HWIO_IPA_STATE_GSI_SKIP_INM(m) in_dword_masked(	\
+		HWIO_IPA_STATE_GSI_SKIP_ADDR, \
+		m)
+#define HWIO_IPA_STATE_GSI_SKIP_IPA_GSI_SKIP_FSM_BMSK 0x3
+#define HWIO_IPA_STATE_GSI_SKIP_IPA_GSI_SKIP_FSM_SHFT 0x0
+#define HWIO_IPA_STATE_GSI_IF_CONS_ADDR (IPA_CFG_REG_BASE + 0x000000c8)
+#define HWIO_IPA_STATE_GSI_IF_CONS_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x000000c8)
+#define HWIO_IPA_STATE_GSI_IF_CONS_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x000000c8)
+#define HWIO_IPA_STATE_GSI_IF_CONS_RMSK 0x7ffffff
+#define HWIO_IPA_STATE_GSI_IF_CONS_ATTR 0x1
+#define HWIO_IPA_STATE_GSI_IF_CONS_IN in_dword_masked( \
+		HWIO_IPA_STATE_GSI_IF_CONS_ADDR, \
+		HWIO_IPA_STATE_GSI_IF_CONS_RMSK)
+#define HWIO_IPA_STATE_GSI_IF_CONS_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_GSI_IF_CONS_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_RX_REQ_NO_ZERO_BMSK \
+	0x7fe0000
+#define	\
+	HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_RX_REQ_NO_ZERO_SHFT \
+	0x11
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_RX_REQ_BMSK \
+	0x1ff80
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_RX_REQ_SHFT 0x7
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_CACHE_VLD_BMSK	\
+	0x7e
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_CACHE_VLD_SHFT	\
+	0x1
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_STATE_BMSK 0x1
+#define HWIO_IPA_STATE_GSI_IF_CONS_IPA_STATE_GSI_IF_CONS_STATE_SHFT 0x0
+#define HWIO_IPA_STATE_ADDR (IPA_CFG_REG_BASE + 0x000000a8)
+#define HWIO_IPA_STATE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000a8)
+#define HWIO_IPA_STATE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000a8)
+#define HWIO_IPA_STATE_RMSK 0xf7ffffff
+#define HWIO_IPA_STATE_ATTR 0x1
+#define HWIO_IPA_STATE_IN in_dword_masked(HWIO_IPA_STATE_ADDR, \
+					  HWIO_IPA_STATE_RMSK)
+#define HWIO_IPA_STATE_INM(m) in_dword_masked(HWIO_IPA_STATE_ADDR, m)
+#define HWIO_IPA_STATE_IPA_UC_RX_HND_CMDQ_EMPTY_BMSK 0x80000000
+#define HWIO_IPA_STATE_IPA_UC_RX_HND_CMDQ_EMPTY_SHFT 0x1f
+#define HWIO_IPA_STATE_IPA_DPS_TX_EMPTY_BMSK 0x40000000
+#define HWIO_IPA_STATE_IPA_DPS_TX_EMPTY_SHFT 0x1e
+#define HWIO_IPA_STATE_IPA_HPS_DPS_EMPTY_BMSK 0x20000000
+#define HWIO_IPA_STATE_IPA_HPS_DPS_EMPTY_SHFT 0x1d
+#define HWIO_IPA_STATE_IPA_RX_HPS_EMPTY_BMSK 0x10000000
+#define HWIO_IPA_STATE_IPA_RX_HPS_EMPTY_SHFT 0x1c
+#define HWIO_IPA_STATE_IPA_RX_SPLT_CMDQ_EMPTY_BMSK 0x7800000
+#define HWIO_IPA_STATE_IPA_RX_SPLT_CMDQ_EMPTY_SHFT 0x17
+#define HWIO_IPA_STATE_IPA_TX_COMMANDER_CMDQ_EMPTY_BMSK 0x400000
+#define HWIO_IPA_STATE_IPA_TX_COMMANDER_CMDQ_EMPTY_SHFT 0x16
+#define HWIO_IPA_STATE_IPA_RX_ACKQ_EMPTY_BMSK 0x200000
+#define HWIO_IPA_STATE_IPA_RX_ACKQ_EMPTY_SHFT 0x15
+#define HWIO_IPA_STATE_IPA_UC_ACKQ_EMPTY_BMSK 0x100000
+#define HWIO_IPA_STATE_IPA_UC_ACKQ_EMPTY_SHFT 0x14
+#define HWIO_IPA_STATE_IPA_TX_ACKQ_EMPTY_BMSK 0x80000
+#define HWIO_IPA_STATE_IPA_TX_ACKQ_EMPTY_SHFT 0x13
+#define HWIO_IPA_STATE_IPA_NTF_TX_EMPTY_BMSK 0x40000
+#define HWIO_IPA_STATE_IPA_NTF_TX_EMPTY_SHFT 0x12
+#define HWIO_IPA_STATE_IPA_FULL_IDLE_BMSK 0x20000
+#define HWIO_IPA_STATE_IPA_FULL_IDLE_SHFT 0x11
+#define HWIO_IPA_STATE_IPA_PROD_BRESP_IDLE_BMSK 0x10000
+#define HWIO_IPA_STATE_IPA_PROD_BRESP_IDLE_SHFT 0x10
+#define HWIO_IPA_STATE_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK 0x8000
+#define HWIO_IPA_STATE_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT 0xf
+#define HWIO_IPA_STATE_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK 0x4000
+#define HWIO_IPA_STATE_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT 0xe
+#define HWIO_IPA_STATE_IPA_TX_ACKQ_FULL_BMSK 0x2000
+#define HWIO_IPA_STATE_IPA_TX_ACKQ_FULL_SHFT 0xd
+#define HWIO_IPA_STATE_IPA_ACKMNGR_STATE_IDLE_BMSK 0x1000
+#define HWIO_IPA_STATE_IPA_ACKMNGR_STATE_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_IPA_ACKMNGR_DB_EMPTY_BMSK 0x800
+#define HWIO_IPA_STATE_IPA_ACKMNGR_DB_EMPTY_SHFT 0xb
+#define HWIO_IPA_STATE_IPA_RSRC_STATE_IDLE_BMSK 0x400
+#define HWIO_IPA_STATE_IPA_RSRC_STATE_IDLE_SHFT 0xa
+#define HWIO_IPA_STATE_IPA_RSRC_MNGR_DB_EMPTY_BMSK 0x200
+#define HWIO_IPA_STATE_IPA_RSRC_MNGR_DB_EMPTY_SHFT 0x9
+#define HWIO_IPA_STATE_MBIM_AGGR_IDLE_BMSK 0x100
+#define HWIO_IPA_STATE_MBIM_AGGR_IDLE_SHFT 0x8
+#define HWIO_IPA_STATE_AGGR_IDLE_BMSK 0x80
+#define HWIO_IPA_STATE_AGGR_IDLE_SHFT 0x7
+#define HWIO_IPA_STATE_IPA_NOC_IDLE_BMSK 0x40
+#define HWIO_IPA_STATE_IPA_NOC_IDLE_SHFT 0x6
+#define HWIO_IPA_STATE_IPA_STATUS_SNIFFER_IDLE_BMSK 0x20
+#define HWIO_IPA_STATE_IPA_STATUS_SNIFFER_IDLE_SHFT 0x5
+#define HWIO_IPA_STATE_BAM_GSI_IDLE_BMSK 0x10
+#define HWIO_IPA_STATE_BAM_GSI_IDLE_SHFT 0x4
+#define HWIO_IPA_STATE_DPL_FIFO_IDLE_BMSK 0x8
+#define HWIO_IPA_STATE_DPL_FIFO_IDLE_SHFT 0x3
+#define HWIO_IPA_STATE_TX_IDLE_BMSK 0x4
+#define HWIO_IPA_STATE_TX_IDLE_SHFT 0x2
+#define HWIO_IPA_STATE_RX_IDLE_BMSK 0x2
+#define HWIO_IPA_STATE_RX_IDLE_SHFT 0x1
+#define HWIO_IPA_STATE_RX_WAIT_BMSK 0x1
+#define HWIO_IPA_STATE_RX_WAIT_SHFT 0x0
+#define HWIO_IPA_STATE_RX_ACTIVE_ADDR (IPA_CFG_REG_BASE + 0x000000ac)
+#define HWIO_IPA_STATE_RX_ACTIVE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000ac)
+#define HWIO_IPA_STATE_RX_ACTIVE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000ac)
+#define HWIO_IPA_STATE_RX_ACTIVE_RMSK 0x1fff
+#define HWIO_IPA_STATE_RX_ACTIVE_ATTR 0x1
+#define HWIO_IPA_STATE_RX_ACTIVE_IN in_dword_masked( \
+		HWIO_IPA_STATE_RX_ACTIVE_ADDR, \
+		HWIO_IPA_STATE_RX_ACTIVE_RMSK)
+#define HWIO_IPA_STATE_RX_ACTIVE_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_RX_ACTIVE_ADDR, \
+		m)
+#define HWIO_IPA_STATE_RX_ACTIVE_ENDPOINTS_BMSK 0x1fff
+#define HWIO_IPA_STATE_RX_ACTIVE_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_STATE_TX0_ADDR (IPA_CFG_REG_BASE + 0x000000b0)
+#define HWIO_IPA_STATE_TX0_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000b0)
+#define HWIO_IPA_STATE_TX0_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000b0)
+#define HWIO_IPA_STATE_TX0_RMSK 0xfffffff
+#define HWIO_IPA_STATE_TX0_ATTR 0x1
+#define HWIO_IPA_STATE_TX0_IN in_dword_masked(HWIO_IPA_STATE_TX0_ADDR, \
+					      HWIO_IPA_STATE_TX0_RMSK)
+#define HWIO_IPA_STATE_TX0_INM(m) in_dword_masked(HWIO_IPA_STATE_TX0_ADDR, \
+						  m)
+#define HWIO_IPA_STATE_TX0_LAST_CMD_PIPE_BMSK 0xf800000
+#define HWIO_IPA_STATE_TX0_LAST_CMD_PIPE_SHFT 0x17
+#define HWIO_IPA_STATE_TX0_CS_SNIF_IDLE_BMSK 0x400000
+#define HWIO_IPA_STATE_TX0_CS_SNIF_IDLE_SHFT 0x16
+#define HWIO_IPA_STATE_TX0_SUSPEND_EMPTY_BMSK 0x200000
+#define HWIO_IPA_STATE_TX0_SUSPEND_EMPTY_SHFT 0x15
+#define HWIO_IPA_STATE_TX0_RSRCREL_IDLE_BMSK 0x100000
+#define HWIO_IPA_STATE_TX0_RSRCREL_IDLE_SHFT 0x14
+#define HWIO_IPA_STATE_TX0_HOLB_MASK_IDLE_BMSK 0x80000
+#define HWIO_IPA_STATE_TX0_HOLB_MASK_IDLE_SHFT 0x13
+#define HWIO_IPA_STATE_TX0_HOLB_IDLE_BMSK 0x40000
+#define HWIO_IPA_STATE_TX0_HOLB_IDLE_SHFT 0x12
+#define HWIO_IPA_STATE_TX0_ALIGNER_EMPTY_BMSK 0x20000
+#define HWIO_IPA_STATE_TX0_ALIGNER_EMPTY_SHFT 0x11
+#define HWIO_IPA_STATE_TX0_PF_EMPTY_BMSK 0x10000
+#define HWIO_IPA_STATE_TX0_PF_EMPTY_SHFT 0x10
+#define HWIO_IPA_STATE_TX0_PF_IDLE_BMSK 0x8000
+#define HWIO_IPA_STATE_TX0_PF_IDLE_SHFT 0xf
+#define HWIO_IPA_STATE_TX0_DMAW_LAST_OUTSD_IDLE_BMSK 0x4000
+#define HWIO_IPA_STATE_TX0_DMAW_LAST_OUTSD_IDLE_SHFT 0xe
+#define HWIO_IPA_STATE_TX0_DMAW_IDLE_BMSK 0x2000
+#define HWIO_IPA_STATE_TX0_DMAW_IDLE_SHFT 0xd
+#define HWIO_IPA_STATE_TX0_AR_IDLE_BMSK 0x1000
+#define HWIO_IPA_STATE_TX0_AR_IDLE_SHFT 0xc
+#define HWIO_IPA_STATE_TX0_TX_CMD_BRESP_INJ_IDLE_BMSK 0x800
+#define HWIO_IPA_STATE_TX0_TX_CMD_BRESP_INJ_IDLE_SHFT 0xb
+#define HWIO_IPA_STATE_TX0_TX_CMD_BRESP_ALOC_IDLE_BMSK 0x400
+#define HWIO_IPA_STATE_TX0_TX_CMD_BRESP_ALOC_IDLE_SHFT 0xa
+#define HWIO_IPA_STATE_TX0_TX_CMD_SNIF_IDLE_BMSK 0x200
+#define HWIO_IPA_STATE_TX0_TX_CMD_SNIF_IDLE_SHFT 0x9
+#define HWIO_IPA_STATE_TX0_TX_CMD_TRNSEQ_IDLE_BMSK 0x100
+#define HWIO_IPA_STATE_TX0_TX_CMD_TRNSEQ_IDLE_SHFT 0x8
+#define HWIO_IPA_STATE_TX0_TX_CMD_MAIN_IDLE_BMSK 0x80
+#define HWIO_IPA_STATE_TX0_TX_CMD_MAIN_IDLE_SHFT 0x7
+#define HWIO_IPA_STATE_TX0_PA_PUB_CNT_EMPTY_BMSK 0x40
+#define HWIO_IPA_STATE_TX0_PA_PUB_CNT_EMPTY_SHFT 0x6
+#define HWIO_IPA_STATE_TX0_PA_CTX_IDLE_BMSK 0x20
+#define HWIO_IPA_STATE_TX0_PA_CTX_IDLE_SHFT 0x5
+#define HWIO_IPA_STATE_TX0_PA_IDLE_BMSK 0x10
+#define HWIO_IPA_STATE_TX0_PA_IDLE_SHFT 0x4
+#define HWIO_IPA_STATE_TX0_NEXT_ARBIT_TYPE_BMSK 0xc
+#define HWIO_IPA_STATE_TX0_NEXT_ARBIT_TYPE_SHFT 0x2
+#define HWIO_IPA_STATE_TX0_LAST_ARBIT_TYPE_BMSK 0x3
+#define HWIO_IPA_STATE_TX0_LAST_ARBIT_TYPE_SHFT 0x0
+#define HWIO_IPA_STATE_AGGR_ACTIVE_ADDR (IPA_CFG_REG_BASE + 0x000000b4)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x000000b4)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x000000b4)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_RMSK 0x7fffffff
+#define HWIO_IPA_STATE_AGGR_ACTIVE_ATTR 0x1
+#define HWIO_IPA_STATE_AGGR_ACTIVE_IN in_dword_masked( \
+		HWIO_IPA_STATE_AGGR_ACTIVE_ADDR, \
+		HWIO_IPA_STATE_AGGR_ACTIVE_RMSK)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_AGGR_ACTIVE_ADDR, \
+		m)
+#define HWIO_IPA_STATE_AGGR_ACTIVE_ENDPOINTS_BMSK 0x7fffffff
+#define HWIO_IPA_STATE_AGGR_ACTIVE_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_GENERIC_RAM_ARBITER_PRIORITY_ADDR (IPA_CFG_REG_BASE + \
+						    0x000000d8)
+#define HWIO_IPA_GENERIC_RAM_ARBITER_PRIORITY_PHYS (IPA_CFG_REG_BASE_PHYS \
+						    + 0x000000d8)
+#define HWIO_IPA_GENERIC_RAM_ARBITER_PRIORITY_OFFS (IPA_CFG_REG_BASE_OFFS \
+						    + 0x000000d8)
+#define HWIO_IPA_STATE_NLO_AGGR_ADDR (IPA_CFG_REG_BASE + 0x000000dc)
+#define HWIO_IPA_STATE_NLO_AGGR_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000000dc)
+#define HWIO_IPA_STATE_NLO_AGGR_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000000dc)
+#define HWIO_IPA_STATE_NLO_AGGR_RMSK 0xffffffff
+#define HWIO_IPA_STATE_NLO_AGGR_ATTR 0x1
+#define HWIO_IPA_STATE_NLO_AGGR_IN in_dword_masked( \
+		HWIO_IPA_STATE_NLO_AGGR_ADDR, \
+		HWIO_IPA_STATE_NLO_AGGR_RMSK)
+#define HWIO_IPA_STATE_NLO_AGGR_INM(m) in_dword_masked(	\
+		HWIO_IPA_STATE_NLO_AGGR_ADDR, \
+		m)
+#define HWIO_IPA_STATE_NLO_AGGR_NLO_AGGR_STATE_BMSK 0xffffffff
+#define HWIO_IPA_STATE_NLO_AGGR_NLO_AGGR_STATE_SHFT 0x0
+#define HWIO_IPA_STATE_COAL_MASTER_1_ADDR (IPA_CFG_REG_BASE + 0x000000e0)
+#define HWIO_IPA_STATE_COAL_MASTER_1_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x000000e0)
+#define HWIO_IPA_STATE_COAL_MASTER_1_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x000000e0)
+#define HWIO_IPA_STATE_COAL_MASTER_1_RMSK 0x3fffffff
+#define HWIO_IPA_STATE_COAL_MASTER_1_ATTR 0x1
+#define HWIO_IPA_STATE_COAL_MASTER_1_IN in_dword_masked( \
+		HWIO_IPA_STATE_COAL_MASTER_1_ADDR, \
+		HWIO_IPA_STATE_COAL_MASTER_1_RMSK)
+#define HWIO_IPA_STATE_COAL_MASTER_1_INM(m) in_dword_masked( \
+		HWIO_IPA_STATE_COAL_MASTER_1_ADDR, \
+		m)
+#define HWIO_IPA_STATE_COAL_MASTER_1_ARBITER_STATE_BMSK 0x3c000000
+#define HWIO_IPA_STATE_COAL_MASTER_1_ARBITER_STATE_SHFT 0x1a
+#define HWIO_IPA_STATE_COAL_MASTER_1_CHECK_FIT_FSM_STATE_BMSK 0x3c00000
+#define HWIO_IPA_STATE_COAL_MASTER_1_CHECK_FIT_FSM_STATE_SHFT 0x16
+#define HWIO_IPA_STATE_COAL_MASTER_1_CHECK_FIT_RD_CTX_LINE_BMSK 0x3f0000
+#define HWIO_IPA_STATE_COAL_MASTER_1_CHECK_FIT_RD_CTX_LINE_SHFT 0x10
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_FSM_STATE_BMSK 0xf000
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_FSM_STATE_SHFT 0xc
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_RD_PKT_LINE_BMSK 0xfc0
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_RD_PKT_LINE_SHFT 0x6
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_WR_CTX_LINE_BMSK 0x3f
+#define HWIO_IPA_STATE_COAL_MASTER_1_INIT_VP_WR_CTX_LINE_SHFT 0x0
+#define HWIO_IPA_YELLOW_MARKER_BELOW_ADDR (IPA_CFG_REG_BASE + 0x00000110)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000110)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000110)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_EN_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000114)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000114)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000114)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_CLR_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000118)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000118)
+#define HWIO_IPA_YELLOW_MARKER_BELOW_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000118)
+#define HWIO_IPA_RED_MARKER_BELOW_ADDR (IPA_CFG_REG_BASE + 0x0000011c)
+#define HWIO_IPA_RED_MARKER_BELOW_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000011c)
+#define HWIO_IPA_RED_MARKER_BELOW_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000011c)
+#define HWIO_IPA_RED_MARKER_BELOW_EN_ADDR (IPA_CFG_REG_BASE + 0x00000120)
+#define HWIO_IPA_RED_MARKER_BELOW_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000120)
+#define HWIO_IPA_RED_MARKER_BELOW_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000120)
+#define HWIO_IPA_RED_MARKER_BELOW_CLR_ADDR (IPA_CFG_REG_BASE + 0x00000124)
+#define HWIO_IPA_RED_MARKER_BELOW_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000124)
+#define HWIO_IPA_RED_MARKER_BELOW_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000124)
+#define HWIO_IPA_YELLOW_MARKER_SHADOW_ADDR (IPA_CFG_REG_BASE + 0x00000128)
+#define HWIO_IPA_YELLOW_MARKER_SHADOW_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000128)
+#define HWIO_IPA_YELLOW_MARKER_SHADOW_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000128)
+#define HWIO_IPA_RED_MARKER_SHADOW_ADDR (IPA_CFG_REG_BASE + 0x0000012c)
+#define HWIO_IPA_RED_MARKER_SHADOW_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x0000012c)
+#define HWIO_IPA_RED_MARKER_SHADOW_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x0000012c)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_ADDR (IPA_CFG_REG_BASE + 0x00000130)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000130)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000130)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_EN_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000134)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000134)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000134)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_CLR_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000138)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000138)
+#define HWIO_IPA_YELLOW_MARKER_ABOVE_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000138)
+#define HWIO_IPA_RED_MARKER_ABOVE_ADDR (IPA_CFG_REG_BASE + 0x0000013c)
+#define HWIO_IPA_RED_MARKER_ABOVE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000013c)
+#define HWIO_IPA_RED_MARKER_ABOVE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000013c)
+#define HWIO_IPA_RED_MARKER_ABOVE_EN_ADDR (IPA_CFG_REG_BASE + 0x00000140)
+#define HWIO_IPA_RED_MARKER_ABOVE_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000140)
+#define HWIO_IPA_RED_MARKER_ABOVE_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000140)
+#define HWIO_IPA_RED_MARKER_ABOVE_CLR_ADDR (IPA_CFG_REG_BASE + 0x00000144)
+#define HWIO_IPA_RED_MARKER_ABOVE_CLR_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000144)
+#define HWIO_IPA_RED_MARKER_ABOVE_CLR_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000144)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_ADDR (IPA_CFG_REG_BASE + 0x00000148)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x00000148)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x00000148)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_RMSK 0x1111
+#define HWIO_IPA_FILT_ROUT_HASH_EN_ATTR 0x3
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IN in_dword_masked( \
+		HWIO_IPA_FILT_ROUT_HASH_EN_ADDR, \
+		HWIO_IPA_FILT_ROUT_HASH_EN_RMSK)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_INM(m) in_dword_masked( \
+		HWIO_IPA_FILT_ROUT_HASH_EN_ADDR, \
+		m)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_OUT(v) out_dword( \
+		HWIO_IPA_FILT_ROUT_HASH_EN_ADDR, \
+		v)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_FILT_ROUT_HASH_EN_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_FILT_ROUT_HASH_EN_IN)
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV4_FILTER_HASH_EN_BMSK 0x1000
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV4_FILTER_HASH_EN_SHFT 0xc
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV4_ROUTER_HASH_EN_BMSK 0x100
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV4_ROUTER_HASH_EN_SHFT 0x8
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV6_FILTER_HASH_EN_BMSK 0x10
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV6_FILTER_HASH_EN_SHFT 0x4
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV6_ROUTER_HASH_EN_BMSK 0x1
+#define HWIO_IPA_FILT_ROUT_HASH_EN_IPV6_ROUTER_HASH_EN_SHFT 0x0
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_ADDR (IPA_CFG_REG_BASE + 0x0000014c)
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x0000014c)
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x0000014c)
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_RMSK 0x1111
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_ATTR 0x2
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_OUT(v) out_dword(	\
+		HWIO_IPA_FILT_ROUT_HASH_FLUSH_ADDR, \
+		v)
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV4_FILTER_HASH_FLUSH_BMSK 0x1000
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV4_FILTER_HASH_FLUSH_SHFT 0xc
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV4_ROUTER_HASH_FLUSH_BMSK 0x100
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV4_ROUTER_HASH_FLUSH_SHFT 0x8
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV6_FILTER_HASH_FLUSH_BMSK 0x10
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV6_FILTER_HASH_FLUSH_SHFT 0x4
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV6_ROUTER_HASH_FLUSH_BMSK 0x1
+#define HWIO_IPA_FILT_ROUT_HASH_FLUSH_IPV6_ROUTER_HASH_FLUSH_SHFT 0x0
+#define HWIO_IPA_FILT_ROUT_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000150)
+#define HWIO_IPA_FILT_ROUT_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000150)
+#define HWIO_IPA_FILT_ROUT_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000150)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000160)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000160)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000160)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_RMSK 0xffffffff
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_ATTR 0x1
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_IN in_dword_masked( \
+		HWIO_IPA_IPV4_FILTER_INIT_VALUES_ADDR, \
+		HWIO_IPA_IPV4_FILTER_INIT_VALUES_RMSK)
+#define HWIO_IPA_IPV4_FILTER_INIT_VALUES_INM(m) in_dword_masked( \
+		HWIO_IPA_IPV4_FILTER_INIT_VALUES_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_IPV4_FILTER_INIT_VALUES_IP_V4_FILTER_INIT_HASHED_ADDR_BMSK \
+	0xffff
+#define	\
+	HWIO_IPA_IPV4_FILTER_INIT_VALUES_IP_V4_FILTER_INIT_HASHED_ADDR_SHFT \
+	0x0
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000164)
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000164)
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000164)
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_RMSK 0xffffffff
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_ATTR 0x1
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_IN in_dword_masked( \
+		HWIO_IPA_IPV6_FILTER_INIT_VALUES_ADDR, \
+		HWIO_IPA_IPV6_FILTER_INIT_VALUES_RMSK)
+#define HWIO_IPA_IPV6_FILTER_INIT_VALUES_INM(m) in_dword_masked( \
+		HWIO_IPA_IPV6_FILTER_INIT_VALUES_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_IPV6_FILTER_INIT_VALUES_IP_V6_FILTER_INIT_HASHED_ADDR_BMSK \
+	0xffff
+#define	\
+	HWIO_IPA_IPV6_FILTER_INIT_VALUES_IP_V6_FILTER_INIT_HASHED_ADDR_SHFT \
+	0x0
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000178)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000178)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000178)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x0000017c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x0000017c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_0_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x0000017c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000180)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000180)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000180)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x00000184)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x00000184)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_1_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x00000184)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000188)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000188)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000188)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x0000018c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x0000018c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_2_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x0000018c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000190)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000190)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000190)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x00000194)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x00000194)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_3_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x00000194)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_4_ADDR (IPA_CFG_REG_BASE + \
+					      0x00000198)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_4_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000198)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_4_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000198)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_5_ADDR (IPA_CFG_REG_BASE + \
+					      0x0000019c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_5_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x0000019c)
+#define HWIO_IPA_IPV4_NAT_INIT_VALUES_5_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x0000019c)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					      0x000001a0)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x000001a0)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x000001a0)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_RMSK 0xffffffff
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_ATTR 0x1
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IN in_dword_masked( \
+		HWIO_IPA_IPV4_ROUTE_INIT_VALUES_ADDR, \
+		HWIO_IPA_IPV4_ROUTE_INIT_VALUES_RMSK)
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_INM(m) in_dword_masked(	\
+		HWIO_IPA_IPV4_ROUTE_INIT_VALUES_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IP_V4_ROUTE_INIT_NON_HASHED_ADDR_BMSK \
+	0xffff0000
+#define	\
+	HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IP_V4_ROUTE_INIT_NON_HASHED_ADDR_SHFT \
+	0x10
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IP_V4_ROUTE_INIT_HASHED_ADDR_BMSK \
+	0xffff
+#define HWIO_IPA_IPV4_ROUTE_INIT_VALUES_IP_V4_ROUTE_INIT_HASHED_ADDR_SHFT \
+	0x0
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					      0x000001a4)
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x000001a4)
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x000001a4)
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_RMSK 0xffffffff
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_ATTR 0x1
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IN in_dword_masked( \
+		HWIO_IPA_IPV6_ROUTE_INIT_VALUES_ADDR, \
+		HWIO_IPA_IPV6_ROUTE_INIT_VALUES_RMSK)
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_INM(m) in_dword_masked(	\
+		HWIO_IPA_IPV6_ROUTE_INIT_VALUES_ADDR, \
+		m)
+#define	\
+	HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IP_V6_ROUTE_INIT_NON_HASHED_ADDR_BMSK \
+	0xffff0000
+#define	\
+	HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IP_V6_ROUTE_INIT_NON_HASHED_ADDR_SHFT \
+	0x10
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IP_V6_ROUTE_INIT_HASHED_ADDR_BMSK \
+	0xffff
+#define HWIO_IPA_IPV6_ROUTE_INIT_VALUES_IP_V6_ROUTE_INIT_HASHED_ADDR_SHFT \
+	0x0
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_ADDR (IPA_CFG_REG_BASE +	\
+						     0x000001a8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_PHYS (IPA_CFG_REG_BASE_PHYS \
+						     + 0x000001a8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_OFFS (IPA_CFG_REG_BASE_OFFS \
+						     + 0x000001a8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_MSB_ADDR (IPA_CFG_REG_BASE \
+							 + 0x000001ac)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_MSB_PHYS ( \
+		IPA_CFG_REG_BASE_PHYS + 0x000001ac)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_0_MSB_OFFS ( \
+		IPA_CFG_REG_BASE_OFFS + 0x000001ac)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_ADDR (IPA_CFG_REG_BASE +	\
+						     0x000001b0)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_PHYS (IPA_CFG_REG_BASE_PHYS \
+						     + 0x000001b0)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_OFFS (IPA_CFG_REG_BASE_OFFS \
+						     + 0x000001b0)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_MSB_ADDR (IPA_CFG_REG_BASE \
+							 + 0x000001b4)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_MSB_PHYS ( \
+		IPA_CFG_REG_BASE_PHYS + 0x000001b4)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_1_MSB_OFFS ( \
+		IPA_CFG_REG_BASE_OFFS + 0x000001b4)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_2_ADDR (IPA_CFG_REG_BASE +	\
+						     0x000001b8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_2_PHYS (IPA_CFG_REG_BASE_PHYS \
+						     + 0x000001b8)
+#define HWIO_IPA_IPV6_CONN_TRACK_INIT_VALUES_2_OFFS (IPA_CFG_REG_BASE_OFFS \
+						     + 0x000001b8)
+#define HWIO_IPA_HDR_INIT_LOCAL_VALUES_ADDR (IPA_CFG_REG_BASE + 0x000001c0)
+#define HWIO_IPA_HDR_INIT_LOCAL_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x000001c0)
+#define HWIO_IPA_HDR_INIT_LOCAL_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x000001c0)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_ADDR (IPA_CFG_REG_BASE + \
+					      0x000001c4)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					      0x000001c4)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					      0x000001c4)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_MSB_ADDR (IPA_CFG_REG_BASE + \
+						  0x000001c8)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x000001c8)
+#define HWIO_IPA_HDR_INIT_SYSTEM_VALUES_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x000001c8)
+#define HWIO_IPA_IMM_CMD_ACCESS_PIPE_VALUES_ADDR (IPA_CFG_REG_BASE + \
+						  0x000001cc)
+#define HWIO_IPA_IMM_CMD_ACCESS_PIPE_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x000001cc)
+#define HWIO_IPA_IMM_CMD_ACCESS_PIPE_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x000001cc)
+#define HWIO_IPA_FRAG_VALUES_ADDR (IPA_CFG_REG_BASE + 0x000001d8)
+#define HWIO_IPA_FRAG_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000001d8)
+#define HWIO_IPA_FRAG_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000001d8)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_ADDR (IPA_CFG_REG_BASE + 0x000001dc)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x000001dc)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x000001dc)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_RMSK 0x7fffffff
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_ATTR 0x1
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_IN in_dword_masked( \
+		HWIO_IPA_BAM_ACTIVATED_PORTS_ADDR, \
+		HWIO_IPA_BAM_ACTIVATED_PORTS_RMSK)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_INM(m) in_dword_masked( \
+		HWIO_IPA_BAM_ACTIVATED_PORTS_ADDR, \
+		m)
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_ENDPOINTS_BMSK 0x7fffffff
+#define HWIO_IPA_BAM_ACTIVATED_PORTS_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR (IPA_CFG_REG_BASE + \
+					       0x000001e0)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x000001e0)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x000001e0)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_RMSK 0xffffffff
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ATTR 0x3
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_IN in_dword_masked( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR, \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_RMSK)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_INM(m) in_dword_masked( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR, \
+		m)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_OUT(v) out_dword( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR, \
+		v)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_IN)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR_BMSK 0xfffffff8
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ADDR_SHFT 0x3
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ZERO_BMSK 0x7
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_ZERO_SHFT 0x0
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR (IPA_CFG_REG_BASE + \
+						   0x000001e4)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						   0x000001e4)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						   0x000001e4)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_RMSK 0xffffffff
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ATTR 0x3
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_IN in_dword_masked( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR, \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_RMSK)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR, \
+		m)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_OUT(v) out_dword( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR, \
+		v)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_OUTM(m, \
+						  v) out_dword_masked_ns( \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_IN)
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_SYS_PKT_PROC_CNTXT_BASE_MSB_ADDR_SHFT 0x0
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR (IPA_CFG_REG_BASE + \
+						 0x000001e8)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						 0x000001e8)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						 0x000001e8)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_RMSK 0x3ffff
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ATTR 0x3
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_IN in_dword_masked( \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR, \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_RMSK)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_INM(m) in_dword_masked( \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR, \
+		m)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_OUT(v) out_dword( \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR, \
+		v)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_IN)
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR_BMSK 0x3fff8
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ADDR_SHFT 0x3
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ZERO_BMSK 0x7
+#define HWIO_IPA_LOCAL_PKT_PROC_CNTXT_BASE_ZERO_SHFT 0x0
+#define HWIO_IPA_AGGR_FORCE_CLOSE_ADDR (IPA_CFG_REG_BASE + 0x000001ec)
+#define HWIO_IPA_AGGR_FORCE_CLOSE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000001ec)
+#define HWIO_IPA_AGGR_FORCE_CLOSE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000001ec)
+#define HWIO_IPA_SCND_FRAG_VALUES_ADDR (IPA_CFG_REG_BASE + 0x000001f4)
+#define HWIO_IPA_SCND_FRAG_VALUES_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000001f4)
+#define HWIO_IPA_SCND_FRAG_VALUES_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000001f4)
+#define HWIO_IPA_TX_CFG_ADDR (IPA_CFG_REG_BASE + 0x000001fc)
+#define HWIO_IPA_TX_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000001fc)
+#define HWIO_IPA_TX_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000001fc)
+#define HWIO_IPA_NAT_UC_EXTERNAL_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000200)
+#define HWIO_IPA_NAT_UC_EXTERNAL_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000200)
+#define HWIO_IPA_NAT_UC_EXTERNAL_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000200)
+#define HWIO_IPA_NAT_UC_LOCAL_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000204)
+#define HWIO_IPA_NAT_UC_LOCAL_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000204)
+#define HWIO_IPA_NAT_UC_LOCAL_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000204)
+#define HWIO_IPA_NAT_UC_SHARED_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000208)
+#define HWIO_IPA_NAT_UC_SHARED_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					 0x00000208)
+#define HWIO_IPA_NAT_UC_SHARED_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					 0x00000208)
+#define HWIO_IPA_RAM_INTLV_CFG_ADDR (IPA_CFG_REG_BASE + 0x0000020c)
+#define HWIO_IPA_RAM_INTLV_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000020c)
+#define HWIO_IPA_RAM_INTLV_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000020c)
+#define HWIO_IPA_FLAVOR_0_ADDR (IPA_CFG_REG_BASE + 0x00000210)
+#define HWIO_IPA_FLAVOR_0_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000210)
+#define HWIO_IPA_FLAVOR_0_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000210)
+#define HWIO_IPA_FLAVOR_1_ADDR (IPA_CFG_REG_BASE + 0x00000214)
+#define HWIO_IPA_FLAVOR_1_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000214)
+#define HWIO_IPA_FLAVOR_1_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000214)
+#define HWIO_IPA_FLAVOR_2_ADDR (IPA_CFG_REG_BASE + 0x00000218)
+#define HWIO_IPA_FLAVOR_2_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000218)
+#define HWIO_IPA_FLAVOR_2_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000218)
+#define HWIO_IPA_FLAVOR_3_ADDR (IPA_CFG_REG_BASE + 0x0000021c)
+#define HWIO_IPA_FLAVOR_3_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000021c)
+#define HWIO_IPA_FLAVOR_3_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000021c)
+#define HWIO_IPA_FLAVOR_4_ADDR (IPA_CFG_REG_BASE + 0x00000220)
+#define HWIO_IPA_FLAVOR_4_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000220)
+#define HWIO_IPA_FLAVOR_4_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000220)
+#define HWIO_IPA_FLAVOR_5_ADDR (IPA_CFG_REG_BASE + 0x00000224)
+#define HWIO_IPA_FLAVOR_5_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000224)
+#define HWIO_IPA_FLAVOR_5_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000224)
+#define HWIO_IPA_FLAVOR_6_ADDR (IPA_CFG_REG_BASE + 0x00000228)
+#define HWIO_IPA_FLAVOR_6_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000228)
+#define HWIO_IPA_FLAVOR_6_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000228)
+#define HWIO_IPA_FLAVOR_7_ADDR (IPA_CFG_REG_BASE + 0x0000022c)
+#define HWIO_IPA_FLAVOR_7_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000022c)
+#define HWIO_IPA_FLAVOR_7_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000022c)
+#define HWIO_IPA_CONN_TRACK_UC_EXTERNAL_CFG_ADDR (IPA_CFG_REG_BASE + \
+						  0x00000230)
+#define HWIO_IPA_CONN_TRACK_UC_EXTERNAL_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						  0x00000230)
+#define HWIO_IPA_CONN_TRACK_UC_EXTERNAL_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						  0x00000230)
+#define HWIO_IPA_CONN_TRACK_UC_LOCAL_CFG_ADDR (IPA_CFG_REG_BASE + \
+					       0x00000234)
+#define HWIO_IPA_CONN_TRACK_UC_LOCAL_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000234)
+#define HWIO_IPA_CONN_TRACK_UC_LOCAL_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000234)
+#define HWIO_IPA_CONN_TRACK_UC_SHARED_CFG_ADDR (IPA_CFG_REG_BASE + \
+						0x00000238)
+#define HWIO_IPA_CONN_TRACK_UC_SHARED_CFG_PHYS (IPA_CFG_REG_BASE_PHYS +	\
+						0x00000238)
+#define HWIO_IPA_CONN_TRACK_UC_SHARED_CFG_OFFS (IPA_CFG_REG_BASE_OFFS +	\
+						0x00000238)
+#define HWIO_IPA_IDLE_INDICATION_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000240)
+#define HWIO_IPA_IDLE_INDICATION_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000240)
+#define HWIO_IPA_IDLE_INDICATION_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000240)
+#define HWIO_IPA_QTIME_TIMESTAMP_CFG_ADDR (IPA_CFG_REG_BASE + 0x0000024c)
+#define HWIO_IPA_QTIME_TIMESTAMP_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x0000024c)
+#define HWIO_IPA_QTIME_TIMESTAMP_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x0000024c)
+#define HWIO_IPA_TIMERS_XO_CLK_DIV_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000250)
+#define HWIO_IPA_TIMERS_XO_CLK_DIV_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000250)
+#define HWIO_IPA_TIMERS_XO_CLK_DIV_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000250)
+#define HWIO_IPA_TIMERS_PULSE_GRAN_CFG_ADDR (IPA_CFG_REG_BASE + 0x00000254)
+#define HWIO_IPA_TIMERS_PULSE_GRAN_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000254)
+#define HWIO_IPA_TIMERS_PULSE_GRAN_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000254)
+#define HWIO_IPA_QTIME_SMP_ADDR (IPA_CFG_REG_BASE + 0x00000260)
+#define HWIO_IPA_QTIME_SMP_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000260)
+#define HWIO_IPA_QTIME_SMP_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000260)
+#define HWIO_IPA_QTIME_LSB_ADDR (IPA_CFG_REG_BASE + 0x00000264)
+#define HWIO_IPA_QTIME_LSB_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000264)
+#define HWIO_IPA_QTIME_LSB_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000264)
+#define HWIO_IPA_QTIME_MSB_ADDR (IPA_CFG_REG_BASE + 0x00000268)
+#define HWIO_IPA_QTIME_MSB_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00000268)
+#define HWIO_IPA_QTIME_MSB_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00000268)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_EN_ADDR (IPA_CFG_REG_BASE + \
+						 0x00000334)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000334)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000334)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_0_ADDR (IPA_CFG_REG_BASE + \
+						       0x00000338)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_0_PHYS (	\
+		IPA_CFG_REG_BASE_PHYS + 0x00000338)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_0_OFFS (	\
+		IPA_CFG_REG_BASE_OFFS + 0x00000338)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_1_ADDR (IPA_CFG_REG_BASE + \
+						       0x0000033c)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_1_PHYS (	\
+		IPA_CFG_REG_BASE_PHYS + 0x0000033c)
+#define HWIO_IPA_SRC_RSRC_AMOUNT_REDUCE_VALUES_1_OFFS (	\
+		IPA_CFG_REG_BASE_OFFS + 0x0000033c)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_EN_ADDR (IPA_CFG_REG_BASE + \
+						 0x00000340)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_EN_PHYS (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000340)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_EN_OFFS (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000340)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_VALUES_0_ADDR (IPA_CFG_REG_BASE + \
+						       0x00000344)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_VALUES_0_PHYS (	\
+		IPA_CFG_REG_BASE_PHYS + 0x00000344)
+#define HWIO_IPA_DST_RSRC_AMOUNT_REDUCE_VALUES_0_OFFS (	\
+		IPA_CFG_REG_BASE_OFFS + 0x00000344)
+#define HWIO_IPA_HPS_DPS_CMDQ_RED_IRQ_MASK_ENABLE_ADDR (IPA_CFG_REG_BASE + \
+							0x00000348)
+#define HWIO_IPA_HPS_DPS_CMDQ_RED_IRQ_MASK_ENABLE_PHYS ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000348)
+#define HWIO_IPA_HPS_DPS_CMDQ_RED_IRQ_MASK_ENABLE_OFFS ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000348)
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000400 + 0x20 * \
+						      (n))
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000400 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000400 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_MAXn 4
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_1_MAX_LIMIT_BMSK \
+	0x3f000000
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_1_MAX_LIMIT_SHFT \
+	0x18
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_1_MIN_LIMIT_BMSK \
+	0x3f0000
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_1_MIN_LIMIT_SHFT \
+	0x10
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_0_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_0_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_0_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_SRC_RSRC_GRP_0_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000404 + 0x20 * \
+						      (n))
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000404 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000404 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_MAXn 4
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_3_MAX_LIMIT_BMSK \
+	0x3f000000
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_3_MAX_LIMIT_SHFT \
+	0x18
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_3_MIN_LIMIT_BMSK \
+	0x3f0000
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_3_MIN_LIMIT_SHFT \
+	0x10
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_2_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_2_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_2_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n_SRC_RSRC_GRP_2_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000408 + 0x20 * \
+						      (n))
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000408 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000408 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n) ( \
+		IPA_CFG_REG_BASE + 0x00000410 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000410 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000410 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_MAXn 4
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ATTR 0x1
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n), \
+		HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_RMSK)
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_INMI(n, \
+							mask) \
+	in_dword_masked( \
+		HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_3_CNT_BMSK \
+	0x3f000000
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_3_CNT_SHFT \
+	0x18
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_2_CNT_BMSK \
+	0x3f0000
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_2_CNT_SHFT \
+	0x10
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_1_CNT_BMSK \
+	0x3f00
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_1_CNT_SHFT \
+	0x8
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_0_CNT_BMSK \
+	0x3f
+#define HWIO_IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n_SRC_RSRC_GRP_0_CNT_SHFT \
+	0x0
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_ADDR(n) ( \
+		IPA_CFG_REG_BASE + 0x00000414 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000414 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_GRP_4567_RSRC_TYPE_CNT_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000414 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_TYPE_AMOUNT_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						 0x00000418 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_TYPE_AMOUNT_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000418 + 0x20 * (n))
+#define HWIO_IPA_SRC_RSRC_TYPE_AMOUNT_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000418 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000500 + 0x20 * \
+						      (n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000500 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000500 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_MAXn 1
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_1_MAX_LIMIT_BMSK \
+	0x3f000000
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_1_MAX_LIMIT_SHFT \
+	0x18
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_1_MIN_LIMIT_BMSK \
+	0x3f0000
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_1_MIN_LIMIT_SHFT \
+	0x10
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_0_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_0_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_0_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_DST_RSRC_GRP_0_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000504 + 0x20 * \
+						      (n))
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000504 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000504 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_MAXn 1
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ATTR 0x3
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_RMSK)
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_INI(n))
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_3_MAX_LIMIT_BMSK \
+	0x3f000000
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_3_MAX_LIMIT_SHFT \
+	0x18
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_3_MIN_LIMIT_BMSK \
+	0x3f0000
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_3_MIN_LIMIT_SHFT \
+	0x10
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_2_MAX_LIMIT_BMSK \
+	0x3f00
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_2_MAX_LIMIT_SHFT \
+	0x8
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_2_MIN_LIMIT_BMSK \
+	0x3f
+#define HWIO_IPA_DST_RSRC_GRP_23_RSRC_TYPE_n_DST_RSRC_GRP_2_MIN_LIMIT_SHFT \
+	0x0
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000508 + 0x20 * \
+						      (n))
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000508 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_45_RSRC_TYPE_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000508 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n) ( \
+		IPA_CFG_REG_BASE + 0x00000510 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000510 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000510 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_RMSK 0x3f3f3f3f
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_MAXn 1
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ATTR 0x1
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n), \
+		HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_RMSK)
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_INMI(n, \
+							mask) \
+	in_dword_masked( \
+		HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_3_CNT_BMSK \
+	0x3f000000
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_3_CNT_SHFT \
+	0x18
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_2_CNT_BMSK \
+	0x3f0000
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_2_CNT_SHFT \
+	0x10
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_1_CNT_BMSK \
+	0x3f00
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_1_CNT_SHFT \
+	0x8
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_0_CNT_BMSK \
+	0x3f
+#define HWIO_IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n_DST_RSRC_GRP_0_CNT_SHFT \
+	0x0
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_ADDR(n) ( \
+		IPA_CFG_REG_BASE + 0x00000514 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000514 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_GRP_4567_RSRC_TYPE_CNT_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000514 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_TYPE_AMOUNT_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						 0x00000518 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_TYPE_AMOUNT_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000518 + 0x20 * (n))
+#define HWIO_IPA_DST_RSRC_TYPE_AMOUNT_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000518 + 0x20 * (n))
+#define HWIO_IPA_RSRC_GRP_CFG_ADDR (IPA_CFG_REG_BASE + 0x000005a0)
+#define HWIO_IPA_RSRC_GRP_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000005a0)
+#define HWIO_IPA_RSRC_GRP_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000005a0)
+#define HWIO_IPA_RSRC_GRP_CFG_RMSK 0x3f11f171
+#define HWIO_IPA_RSRC_GRP_CFG_ATTR 0x3
+#define HWIO_IPA_RSRC_GRP_CFG_IN in_dword_masked( \
+		HWIO_IPA_RSRC_GRP_CFG_ADDR, \
+		HWIO_IPA_RSRC_GRP_CFG_RMSK)
+#define HWIO_IPA_RSRC_GRP_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_RSRC_GRP_CFG_ADDR, \
+		m)
+#define HWIO_IPA_RSRC_GRP_CFG_OUT(v) out_dword(HWIO_IPA_RSRC_GRP_CFG_ADDR, \
+					       v)
+#define HWIO_IPA_RSRC_GRP_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RSRC_GRP_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RSRC_GRP_CFG_IN)
+#define HWIO_IPA_RSRC_GRP_CFG_DST_GRP_SPECIAL_INDEX_BMSK 0x3f000000
+#define HWIO_IPA_RSRC_GRP_CFG_DST_GRP_SPECIAL_INDEX_SHFT 0x18
+#define HWIO_IPA_RSRC_GRP_CFG_DST_GRP_SPECIAL_VALID_BMSK 0x100000
+#define HWIO_IPA_RSRC_GRP_CFG_DST_GRP_SPECIAL_VALID_SHFT 0x14
+#define HWIO_IPA_RSRC_GRP_CFG_DST_PIPE_SPECIAL_INDEX_BMSK 0x1f000
+#define HWIO_IPA_RSRC_GRP_CFG_DST_PIPE_SPECIAL_INDEX_SHFT 0xc
+#define HWIO_IPA_RSRC_GRP_CFG_DST_PIPE_SPECIAL_VALID_BMSK 0x100
+#define HWIO_IPA_RSRC_GRP_CFG_DST_PIPE_SPECIAL_VALID_SHFT 0x8
+#define HWIO_IPA_RSRC_GRP_CFG_SRC_GRP_SPECIAL_INDEX_BMSK 0x70
+#define HWIO_IPA_RSRC_GRP_CFG_SRC_GRP_SPECIAL_INDEX_SHFT 0x4
+#define HWIO_IPA_RSRC_GRP_CFG_SRC_GRP_SPECIAL_VALID_BMSK 0x1
+#define HWIO_IPA_RSRC_GRP_CFG_SRC_GRP_SPECIAL_VALID_SHFT 0x0
+#define HWIO_IPA_PIPELINE_DISABLE_ADDR (IPA_CFG_REG_BASE + 0x000005a8)
+#define HWIO_IPA_PIPELINE_DISABLE_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000005a8)
+#define HWIO_IPA_PIPELINE_DISABLE_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000005a8)
+#define HWIO_IPA_PIPELINE_DISABLE_RMSK 0x8
+#define HWIO_IPA_PIPELINE_DISABLE_ATTR 0x3
+#define HWIO_IPA_PIPELINE_DISABLE_IN in_dword_masked( \
+		HWIO_IPA_PIPELINE_DISABLE_ADDR,	\
+		HWIO_IPA_PIPELINE_DISABLE_RMSK)
+#define HWIO_IPA_PIPELINE_DISABLE_INM(m) in_dword_masked( \
+		HWIO_IPA_PIPELINE_DISABLE_ADDR,	\
+		m)
+#define HWIO_IPA_PIPELINE_DISABLE_OUT(v) out_dword( \
+		HWIO_IPA_PIPELINE_DISABLE_ADDR,	\
+		v)
+#define HWIO_IPA_PIPELINE_DISABLE_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_PIPELINE_DISABLE_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_PIPELINE_DISABLE_IN)
+#define HWIO_IPA_PIPELINE_DISABLE_RX_CMDQ_SPLITTER_DIS_BMSK 0x8
+#define HWIO_IPA_PIPELINE_DISABLE_RX_CMDQ_SPLITTER_DIS_SHFT 0x3
+#define HWIO_IPA_AXI_CFG_ADDR (IPA_CFG_REG_BASE + 0x000005ac)
+#define HWIO_IPA_AXI_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x000005ac)
+#define HWIO_IPA_AXI_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x000005ac)
+#define HWIO_IPA_STAT_QUOTA_BASE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					    0x00000700 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_BASE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000700 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_BASE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000700 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_MASK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					    0x00000708 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_MASK_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					    0x00000708 + 0x4 * (n))
+#define HWIO_IPA_STAT_QUOTA_MASK_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					    0x00000708 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_BASE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00000710 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_BASE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00000710 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_BASE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00000710 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_MASK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00000718 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_MASK_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00000718 + 0x4 * (n))
+#define HWIO_IPA_STAT_TETHERING_MASK_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00000718 + 0x4 * (n))
+#define HWIO_IPA_STAT_FILTER_IPV4_BASE_ADDR (IPA_CFG_REG_BASE + 0x00000720)
+#define HWIO_IPA_STAT_FILTER_IPV4_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000720)
+#define HWIO_IPA_STAT_FILTER_IPV4_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000720)
+#define HWIO_IPA_STAT_FILTER_IPV6_BASE_ADDR (IPA_CFG_REG_BASE + 0x00000724)
+#define HWIO_IPA_STAT_FILTER_IPV6_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000724)
+#define HWIO_IPA_STAT_FILTER_IPV6_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000724)
+#define HWIO_IPA_STAT_ROUTER_IPV4_BASE_ADDR (IPA_CFG_REG_BASE + 0x00000728)
+#define HWIO_IPA_STAT_ROUTER_IPV4_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000728)
+#define HWIO_IPA_STAT_ROUTER_IPV4_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000728)
+#define HWIO_IPA_STAT_ROUTER_IPV6_BASE_ADDR (IPA_CFG_REG_BASE + 0x0000072c)
+#define HWIO_IPA_STAT_ROUTER_IPV6_BASE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x0000072c)
+#define HWIO_IPA_STAT_ROUTER_IPV6_BASE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x0000072c)
+#define HWIO_IPA_STAT_DROP_CNT_BASE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					       0x00000750 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_BASE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000750 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_BASE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000750 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_MASK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					       0x00000758 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_MASK_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000758 + 0x4 * (n))
+#define HWIO_IPA_STAT_DROP_CNT_MASK_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000758 + 0x4 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000800 + \
+					   0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000800 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000800 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_RMSK 0x3
+#define HWIO_IPA_ENDP_INIT_CTRL_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_CTRL_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_CTRL_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_CTRL_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_CTRL_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_CTRL_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_CTRL_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_CTRL_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK 0x2
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT 0x1
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK 0x1
+#define HWIO_IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00000804 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00000804 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00000804 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_RMSK 0x2
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR(n),	\
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR(n),	\
+		val)
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_OUTMI(n, mask, \
+					     val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_CTRL_SCND_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_BMSK 0x2
+#define HWIO_IPA_ENDP_INIT_CTRL_SCND_n_ENDP_DELAY_SHFT 0x1
+#define HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000808 + \
+					  0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CFG_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					  0x00000808 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CFG_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					  0x00000808 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CFG_n_RMSK 0x17f
+#define HWIO_IPA_ENDP_INIT_CFG_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_CFG_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_CFG_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_CFG_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_CFG_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_CFG_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_CFG_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_CFG_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_CFG_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_CFG_n_GEN_QMB_MASTER_SEL_BMSK 0x100
+#define HWIO_IPA_ENDP_INIT_CFG_n_GEN_QMB_MASTER_SEL_SHFT 0x8
+#define HWIO_IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
+#define HWIO_IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
+#define HWIO_IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
+#define HWIO_IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
+#define HWIO_IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
+#define HWIO_IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n) (IPA_CFG_REG_BASE + 0x0000080c + \
+					  0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_NAT_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					  0x0000080c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_NAT_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					  0x0000080c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_NAT_n_RMSK 0x3
+#define HWIO_IPA_ENDP_INIT_NAT_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_NAT_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_NAT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_NAT_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_NAT_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_NAT_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_NAT_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_NAT_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_NAT_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define HWIO_IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000810 + \
+					  0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					  0x00000810 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					  0x00000810 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_n_RMSK 0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HDR_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HDR_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HDR_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HDR_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HDR_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HDR_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HDR_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_BMSK 0xc0000000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_MSB_SHFT 0x1e
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_BMSK 0x30000000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_MSB_SHFT 0x1c
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK 0x8000000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT 0x1b
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define HWIO_IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					      0x00000814 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000814 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000814 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_RMSK 0x3f3fff
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HDR_EXT_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_BMSK \
+	0x300000
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_ADDITIONAL_CONST_LEN_MSB_SHFT \
+	0x14
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_BMSK 0xc0000
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_OFST_PKT_SIZE_MSB_SHFT 0x12
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_BMSK \
+	0x30000
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_SHFT \
+	0x10
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK 0x3c00
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK \
+	0x3f0
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
+#define HWIO_IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+							0x00000818 + \
+							0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000818 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000818 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_RMSK 0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR(n),	\
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_INMI(n, \
+						    mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR(n),	\
+		val)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_OUTMI(n, mask, \
+						     val) \
+	out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_ADDR(n),	\
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK \
+	0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						   0x0000081c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						   0x0000081c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						   0x0000081c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_RMSK 0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HDR_METADATA_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK 0xffffffff
+#define HWIO_IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000820 + \
+					   0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_MODE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000820 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_MODE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000820 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_MODE_n_RMSK 0x3ffff1ff
+#define HWIO_IPA_ENDP_INIT_MODE_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_MODE_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_MODE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_MODE_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_MODE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_MODE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_MODE_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_MODE_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_MODE_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK 0x20000000
+#define HWIO_IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT 0x1d
+#define HWIO_IPA_ENDP_INIT_MODE_n_PIPE_REPLICATE_EN_BMSK 0x10000000
+#define HWIO_IPA_ENDP_INIT_MODE_n_PIPE_REPLICATE_EN_SHFT 0x1c
+#define HWIO_IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK 0xffff000
+#define HWIO_IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT 0xc
+#define HWIO_IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x1f0
+#define HWIO_IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x4
+#define HWIO_IPA_ENDP_INIT_MODE_n_DCPH_ENABLE_BMSK 0x8
+#define HWIO_IPA_ENDP_INIT_MODE_n_DCPH_ENABLE_SHFT 0x3
+#define HWIO_IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x7
+#define HWIO_IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000824 + \
+					   0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_AGGR_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					   0x00000824 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_AGGR_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					   0x00000824 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_AGGR_n_RMSK 0xdfff7ff
+#define HWIO_IPA_ENDP_INIT_AGGR_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_AGGR_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_AGGR_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_AGGR_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_AGGR_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_AGGR_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_AGGR_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_AGGR_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_AGGR_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_BMSK 0x8000000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_GRAN_SEL_SHFT 0x1b
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK \
+	0x4000000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT 0x1a
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x1000000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x18
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK 0x800000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x17
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x7e0000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0x11
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x1f000
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xc
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x7e0
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define HWIO_IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						   0x0000082c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						   0x0000082c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						   0x0000082c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						      0x00000830 + 0x70 * \
+						      (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_PHYS(n) ( \
+		IPA_CFG_REG_BASE_PHYS + 0x00000830 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OFFS(n) ( \
+		IPA_CFG_REG_BASE_OFFS + 0x00000830 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_RMSK 0x11f
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OUTMI(n, mask, \
+						   val)	\
+	out_dword_masked_ns(HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_ADDR( \
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_BMSK 0x100
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_GRAN_SEL_SHFT 0x8
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_BMSK 0x1f
+#define HWIO_IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIME_LIMIT_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR(n) (IPA_CFG_REG_BASE +	\
+					     0x00000834 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					     0x00000834 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					     0x00000834 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_RMSK 0xffff7fff
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_OUTMI(n, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_DEAGGR_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xffff0000
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_IGNORE_MIN_PKT_ERR_BMSK 0x4000
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_IGNORE_MIN_PKT_ERR_SHFT 0xe
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3f00
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x80
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_SYSPIPE_ERR_DETECTION_BMSK 0x40
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_SYSPIPE_ERR_DETECTION_SHFT 0x6
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3f
+#define HWIO_IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					       0x00000838 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000838 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000838 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_RMSK 0x7
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_MAXn 30
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_OUTMI(n, mask, \
+					    val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_RSRC_GRP_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7
+#define HWIO_IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0x0
+#define HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n) (IPA_CFG_REG_BASE + 0x0000083c + \
+					  0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_SEQ_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					  0x0000083c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_SEQ_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					  0x0000083c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_SEQ_n_RMSK 0xffff
+#define HWIO_IPA_ENDP_INIT_SEQ_n_MAXn 12
+#define HWIO_IPA_ENDP_INIT_SEQ_n_ATTR 0x3
+#define HWIO_IPA_ENDP_INIT_SEQ_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n), \
+		HWIO_IPA_ENDP_INIT_SEQ_n_RMSK)
+#define HWIO_IPA_ENDP_INIT_SEQ_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_INIT_SEQ_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_INIT_SEQ_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_INIT_SEQ_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_INIT_SEQ_n_INI(n))
+#define HWIO_IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000
+#define HWIO_IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc
+#define HWIO_IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00
+#define HWIO_IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8
+#define HWIO_IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0
+#define HWIO_IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4
+#define HWIO_IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf
+#define HWIO_IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0
+#define HWIO_IPA_ENDP_STATUS_n_ADDR(n) (IPA_CFG_REG_BASE + 0x00000840 +	\
+					0x70 * (n))
+#define HWIO_IPA_ENDP_STATUS_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+					0x00000840 + 0x70 * (n))
+#define HWIO_IPA_ENDP_STATUS_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+					0x00000840 + 0x70 * (n))
+#define HWIO_IPA_ENDP_STATUS_n_RMSK 0x23f
+#define HWIO_IPA_ENDP_STATUS_n_MAXn 30
+#define HWIO_IPA_ENDP_STATUS_n_ATTR 0x3
+#define HWIO_IPA_ENDP_STATUS_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_STATUS_n_ADDR(n),	\
+		HWIO_IPA_ENDP_STATUS_n_RMSK)
+#define HWIO_IPA_ENDP_STATUS_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_STATUS_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_ENDP_STATUS_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_STATUS_n_ADDR(n),	\
+		val)
+#define HWIO_IPA_ENDP_STATUS_n_OUTMI(n, mask, val) out_dword_masked_ns(	\
+		HWIO_IPA_ENDP_STATUS_n_ADDR(n),	\
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_STATUS_n_INI(n))
+#define HWIO_IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
+#define HWIO_IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
+#define HWIO_IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
+#define HWIO_IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
+#define HWIO_IPA_ENDP_SRC_ID_WRITE_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					      0x00000848 + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_WRITE_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					      0x00000848 + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_WRITE_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					      0x00000848 + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_READ_n_ADDR(n) (IPA_CFG_REG_BASE +	\
+					     0x0000084c + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_READ_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					     0x0000084c + 0x70 * (n))
+#define HWIO_IPA_ENDP_SRC_ID_READ_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					     0x0000084c + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CONN_TRACK_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						 0x00000850 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CONN_TRACK_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						 0x00000850 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CONN_TRACK_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						 0x00000850 + 0x70 * (n))
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						       0x0000085c + 0x70 * \
+						       (n))
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_PHYS(n) (	\
+		IPA_CFG_REG_BASE_PHYS + 0x0000085c + 0x70 * (n))
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OFFS(n) (	\
+		IPA_CFG_REG_BASE_OFFS + 0x0000085c + 0x70 * (n))
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_RMSK 0x7f007f
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_MAXn 31
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ATTR 0x3
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR(n), \
+		HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_RMSK)
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_INMI(n, \
+						   mask) in_dword_masked( \
+		HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR( \
+			n), \
+		mask)
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OUTMI(n, mask, \
+						    val) \
+	out_dword_masked_ns(HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ADDR(	\
+				    n),	\
+			    mask, val, \
+			    HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_INI(n))
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK \
+	0x400000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT \
+	0x16
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK \
+	0x200000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT \
+	0x15
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK \
+	0x100000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT \
+	0x14
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK \
+	0x80000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT \
+	0x13
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_ADD_BMSK \
+	0x40000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_ADD_SHFT \
+	0x12
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_ADD_BMSK \
+	0x20000
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_ADD_SHFT \
+	0x11
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK \
+	0x10000
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT \
+	0x10
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK \
+	0x40
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT \
+	0x6
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK \
+	0x20
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT \
+	0x5
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK \
+	0x10
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT \
+	0x4
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK \
+	0x8
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT \
+	0x3
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_ADD_BMSK \
+	0x4
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_ADD_SHFT \
+	0x2
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_ADD_BMSK \
+	0x2
+#define	\
+	HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_ADD_SHFT \
+	0x1
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK \
+	0x1
+#define HWIO_IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT \
+	0x0
+#define HWIO_IPA_ENDP_YELLOW_RED_MARKER_CFG_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						       0x00000860 + 0x70 * \
+						       (n))
+#define HWIO_IPA_ENDP_YELLOW_RED_MARKER_CFG_n_PHYS(n) (	\
+		IPA_CFG_REG_BASE_PHYS + 0x00000860 + 0x70 * (n))
+#define HWIO_IPA_ENDP_YELLOW_RED_MARKER_CFG_n_OFFS(n) (	\
+		IPA_CFG_REG_BASE_OFFS + 0x00000860 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_STATUS_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						  0x00000864 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_STATUS_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+						  0x00000864 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_CTRL_STATUS_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+						  0x00000864 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_PROD_CFG_n_ADDR(n) (IPA_CFG_REG_BASE + \
+					       0x00000868 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_PROD_CFG_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS + \
+					       0x00000868 + 0x70 * (n))
+#define HWIO_IPA_ENDP_INIT_PROD_CFG_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS + \
+					       0x00000868 + 0x70 * (n))
+#define HWIO_IPA_NLO_PP_CFG1_ADDR (IPA_CFG_REG_BASE + 0x00001680)
+#define HWIO_IPA_NLO_PP_CFG1_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001680)
+#define HWIO_IPA_NLO_PP_CFG1_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001680)
+#define HWIO_IPA_NLO_PP_CFG1_RMSK 0x3fffffff
+#define HWIO_IPA_NLO_PP_CFG1_ATTR 0x3
+#define HWIO_IPA_NLO_PP_CFG1_IN in_dword_masked(HWIO_IPA_NLO_PP_CFG1_ADDR, \
+						HWIO_IPA_NLO_PP_CFG1_RMSK)
+#define HWIO_IPA_NLO_PP_CFG1_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_PP_CFG1_ADDR, \
+		m)
+#define HWIO_IPA_NLO_PP_CFG1_OUT(v) out_dword(HWIO_IPA_NLO_PP_CFG1_ADDR, v)
+#define HWIO_IPA_NLO_PP_CFG1_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_PP_CFG1_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_PP_CFG1_IN)
+#define HWIO_IPA_NLO_PP_CFG1_NLO_ACK_MAX_VP_BMSK 0x3f000000
+#define HWIO_IPA_NLO_PP_CFG1_NLO_ACK_MAX_VP_SHFT 0x18
+#define HWIO_IPA_NLO_PP_CFG1_NLO_STATUS_PP_BMSK 0xff0000
+#define HWIO_IPA_NLO_PP_CFG1_NLO_STATUS_PP_SHFT 0x10
+#define HWIO_IPA_NLO_PP_CFG1_NLO_DATA_PP_BMSK 0xff00
+#define HWIO_IPA_NLO_PP_CFG1_NLO_DATA_PP_SHFT 0x8
+#define HWIO_IPA_NLO_PP_CFG1_NLO_ACK_PP_BMSK 0xff
+#define HWIO_IPA_NLO_PP_CFG1_NLO_ACK_PP_SHFT 0x0
+#define HWIO_IPA_NLO_PP_CFG2_ADDR (IPA_CFG_REG_BASE + 0x00001684)
+#define HWIO_IPA_NLO_PP_CFG2_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001684)
+#define HWIO_IPA_NLO_PP_CFG2_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001684)
+#define HWIO_IPA_NLO_PP_CFG2_RMSK 0x7ffff
+#define HWIO_IPA_NLO_PP_CFG2_ATTR 0x3
+#define HWIO_IPA_NLO_PP_CFG2_IN in_dword_masked(HWIO_IPA_NLO_PP_CFG2_ADDR, \
+						HWIO_IPA_NLO_PP_CFG2_RMSK)
+#define HWIO_IPA_NLO_PP_CFG2_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_PP_CFG2_ADDR, \
+		m)
+#define HWIO_IPA_NLO_PP_CFG2_OUT(v) out_dword(HWIO_IPA_NLO_PP_CFG2_ADDR, v)
+#define HWIO_IPA_NLO_PP_CFG2_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_PP_CFG2_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_PP_CFG2_IN)
+#define HWIO_IPA_NLO_PP_CFG2_NLO_STATUS_BUFFER_MODE_BMSK 0x40000
+#define HWIO_IPA_NLO_PP_CFG2_NLO_STATUS_BUFFER_MODE_SHFT 0x12
+#define HWIO_IPA_NLO_PP_CFG2_NLO_DATA_BUFFER_MODE_BMSK 0x20000
+#define HWIO_IPA_NLO_PP_CFG2_NLO_DATA_BUFFER_MODE_SHFT 0x11
+#define HWIO_IPA_NLO_PP_CFG2_NLO_ACK_BUFFER_MODE_BMSK 0x10000
+#define HWIO_IPA_NLO_PP_CFG2_NLO_ACK_BUFFER_MODE_SHFT 0x10
+#define HWIO_IPA_NLO_PP_CFG2_NLO_DATA_CLOSE_PADD_BMSK 0xff00
+#define HWIO_IPA_NLO_PP_CFG2_NLO_DATA_CLOSE_PADD_SHFT 0x8
+#define HWIO_IPA_NLO_PP_CFG2_NLO_ACK_CLOSE_PADD_BMSK 0xff
+#define HWIO_IPA_NLO_PP_CFG2_NLO_ACK_CLOSE_PADD_SHFT 0x0
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR (IPA_CFG_REG_BASE + 0x00001688)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					    0x00001688)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					    0x00001688)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_RMSK 0xffffffff
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ATTR 0x3
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_IN in_dword_masked( \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR, \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_RMSK)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR, \
+		m)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_OUT(v) out_dword(	\
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR, \
+		v)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_IN)
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_NLO_ACK_UPPER_SIZE_BMSK 0xffff0000
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_NLO_ACK_UPPER_SIZE_SHFT 0x10
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_NLO_ACK_LOWER_SIZE_BMSK 0xffff
+#define HWIO_IPA_NLO_PP_ACK_LIMIT_CFG_NLO_ACK_LOWER_SIZE_SHFT 0x0
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR (IPA_CFG_REG_BASE + 0x0000168c)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					     0x0000168c)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					     0x0000168c)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_RMSK 0xffffffff
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ATTR 0x3
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_IN in_dword_masked( \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR, \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_RMSK)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR, \
+		m)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_OUT(v) out_dword( \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR, \
+		v)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_IN)
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_NLO_DATA_UPPER_SIZE_BMSK 0xffff0000
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_NLO_DATA_UPPER_SIZE_SHFT 0x10
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_NLO_DATA_LOWER_SIZE_BMSK 0xffff
+#define HWIO_IPA_NLO_PP_DATA_LIMIT_CFG_NLO_DATA_LOWER_SIZE_SHFT 0x0
+#define HWIO_IPA_NLO_MIN_DSM_CFG_ADDR (IPA_CFG_REG_BASE + 0x00001690)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001690)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001690)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_RMSK 0xffffffff
+#define HWIO_IPA_NLO_MIN_DSM_CFG_ATTR 0x3
+#define HWIO_IPA_NLO_MIN_DSM_CFG_IN in_dword_masked( \
+		HWIO_IPA_NLO_MIN_DSM_CFG_ADDR, \
+		HWIO_IPA_NLO_MIN_DSM_CFG_RMSK)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_MIN_DSM_CFG_ADDR, \
+		m)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_OUT(v) out_dword( \
+		HWIO_IPA_NLO_MIN_DSM_CFG_ADDR, \
+		v)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_MIN_DSM_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NLO_MIN_DSM_CFG_IN)
+#define HWIO_IPA_NLO_MIN_DSM_CFG_NLO_DATA_MIN_DSM_LEN_BMSK 0xffff0000
+#define HWIO_IPA_NLO_MIN_DSM_CFG_NLO_DATA_MIN_DSM_LEN_SHFT 0x10
+#define HWIO_IPA_NLO_MIN_DSM_CFG_NLO_ACK_MIN_DSM_LEN_BMSK 0xffff
+#define HWIO_IPA_NLO_MIN_DSM_CFG_NLO_ACK_MIN_DSM_LEN_SHFT 0x0
+#define HWIO_IPA_NLO_VP_AGGR_CFG_LSB_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00001700 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_LSB_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00001700 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_LSB_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00001700 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_MSB_n_ADDR(n) (IPA_CFG_REG_BASE + \
+						0x00001704 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_MSB_n_PHYS(n) (IPA_CFG_REG_BASE_PHYS +	\
+						0x00001704 + 0x8 * (n))
+#define HWIO_IPA_NLO_VP_AGGR_CFG_MSB_n_OFFS(n) (IPA_CFG_REG_BASE_OFFS +	\
+						0x00001704 + 0x8 * (n))
+#define HWIO_IPA_SNIFFER_QMB_SEL_ADDR (IPA_CFG_REG_BASE + 0x00001800)
+#define HWIO_IPA_SNIFFER_QMB_SEL_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001800)
+#define HWIO_IPA_SNIFFER_QMB_SEL_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001800)
+#define HWIO_IPA_COAL_EVICT_LRU_ADDR (IPA_CFG_REG_BASE + 0x0000180c)
+#define HWIO_IPA_COAL_EVICT_LRU_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000180c)
+#define HWIO_IPA_COAL_EVICT_LRU_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000180c)
+#define HWIO_IPA_COAL_QMAP_CFG_ADDR (IPA_CFG_REG_BASE + 0x00001810)
+#define HWIO_IPA_COAL_QMAP_CFG_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001810)
+#define HWIO_IPA_COAL_QMAP_CFG_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001810)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR (IPA_CFG_REG_BASE + 0x00001814)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001814)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001814)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_RMSK 0x80ff00ff
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_ATTR 0x3
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR,	\
+		HWIO_IPA_NLO_VP_FLUSH_REQ_RMSK)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR,	\
+		m)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_OUT(v) out_dword( \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR,	\
+		v)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_NLO_VP_FLUSH_REQ_IN)
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_REQ_BMSK 0x80000000
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_REQ_SHFT 0x1f
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_VP_INDX_BMSK 0xff0000
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_VP_INDX_SHFT 0x10
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_PP_INDX_BMSK 0xff
+#define HWIO_IPA_NLO_VP_FLUSH_REQ_VP_FLUSH_PP_INDX_SHFT 0x0
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_ADDR (IPA_CFG_REG_BASE + 0x00001818)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_PHYS (IPA_CFG_REG_BASE_PHYS + \
+					   0x00001818)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_OFFS (IPA_CFG_REG_BASE_OFFS + \
+					   0x00001818)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_RMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_ATTR 0x1
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_COOKIE_ADDR, \
+		HWIO_IPA_NLO_VP_FLUSH_COOKIE_RMSK)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_COOKIE_ADDR, \
+		m)
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_VP_FLUSH_COOKIE_BMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_FLUSH_COOKIE_VP_FLUSH_COOKIE_SHFT 0x0
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_ADDR (IPA_CFG_REG_BASE + 0x0000181c)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_PHYS (IPA_CFG_REG_BASE_PHYS + 0x0000181c)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_OFFS (IPA_CFG_REG_BASE_OFFS + 0x0000181c)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_RMSK 0x1
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_ATTR 0x1
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_ACK_ADDR,	\
+		HWIO_IPA_NLO_VP_FLUSH_ACK_RMSK)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_FLUSH_ACK_ADDR,	\
+		m)
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_VP_FLUSH_ACK_BMSK 0x1
+#define HWIO_IPA_NLO_VP_FLUSH_ACK_VP_FLUSH_ACK_SHFT 0x0
+#define HWIO_IPA_NLO_VP_DSM_OPEN_ADDR (IPA_CFG_REG_BASE + 0x00001820)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001820)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001820)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_RMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_DSM_OPEN_ATTR 0x1
+#define HWIO_IPA_NLO_VP_DSM_OPEN_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_DSM_OPEN_ADDR, \
+		HWIO_IPA_NLO_VP_DSM_OPEN_RMSK)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_DSM_OPEN_ADDR, \
+		m)
+#define HWIO_IPA_NLO_VP_DSM_OPEN_VP_DSM_OPEN_BMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_DSM_OPEN_VP_DSM_OPEN_SHFT 0x0
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_ADDR (IPA_CFG_REG_BASE + 0x00001824)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_PHYS (IPA_CFG_REG_BASE_PHYS + 0x00001824)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_OFFS (IPA_CFG_REG_BASE_OFFS + 0x00001824)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_RMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_ATTR 0x1
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_IN in_dword_masked( \
+		HWIO_IPA_NLO_VP_QBAP_OPEN_ADDR,	\
+		HWIO_IPA_NLO_VP_QBAP_OPEN_RMSK)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_INM(m) in_dword_masked( \
+		HWIO_IPA_NLO_VP_QBAP_OPEN_ADDR,	\
+		m)
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_VP_QBAP_OPEN_BMSK 0xffffffff
+#define HWIO_IPA_NLO_VP_QBAP_OPEN_VP_QBAP_OPEN_SHFT 0x0
+#define IPA_DEBUG_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00042000)
+#define IPA_DEBUG_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + 0x00042000)
+#define IPA_DEBUG_REG_BASE_OFFS 0x00042000
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000000)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000000)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000000)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000004)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_BLOCK_STATUS_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000004)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_BLOCK_STATUS_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000004)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000008)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000008)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000008)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x0000000c)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000000c)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000000c)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000010)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000010)
+#define HWIO_IPA_HPS_FTCH_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000010)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000014)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000014)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000014)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000018)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_BLOCK_STATUS_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000018)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_BLOCK_STATUS_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000018)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000001c)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000001c)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000001c)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000020)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000020)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000020)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000024)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000024)
+#define HWIO_IPA_DPS_FTCH_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000024)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000028)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_STATUS_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000028)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_STATUS_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000028)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_BLOCK_STATUS_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x0000002c)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000002c)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000002c)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_MASK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000030)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_MASK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000030)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_MASK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000030)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_BLOCK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000034)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000034)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000034)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000038)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CMD_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000038)
+#define HWIO_IPA_RSRC_MNGR_FUNC_ARB_DEBUG_CMD_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000038)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+							+ 0x0000003c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000003c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000003c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_BLOCK_STATUS_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000040)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000040)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000040)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_MASK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000044)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_MASK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000044)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_MASK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000044)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_BLOCK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000048)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000048)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000048)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+						     0x0000004c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CMD_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000004c)
+#define HWIO_IPA_RSRC_MNGR_ALLOC_ARB_DEBUG_CMD_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000004c)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000050)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_STATUS_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000050)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_STATUS_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000050)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_BLOCK_STATUS_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000054)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000054)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000054)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_MASK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000058)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_MASK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000058)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_MASK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000058)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_BLOCK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x0000005c)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000005c)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000005c)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000060)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CMD_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000060)
+#define HWIO_IPA_RSRC_MNGR_SRCH_ARB_DEBUG_CMD_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000060)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000064)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000064)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000064)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_BLOCK_STATUS_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000068)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000068)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000068)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE \
+							+ 0x0000006c)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_MASK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000006c)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_MASK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000006c)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_BLOCK_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000070)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_BLOCK_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000070)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CFG_BLOCK_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000070)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000074)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000074)
+#define HWIO_IPA_RSRC_MNGR_REL_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000074)
+#define HWIO_IPA_TX_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000078)
+#define HWIO_IPA_TX_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000078)
+#define HWIO_IPA_TX_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000078)
+#define HWIO_IPA_TX_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x0000007c)
+#define HWIO_IPA_TX_ARB_DEBUG_BLOCK_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x0000007c)
+#define HWIO_IPA_TX_ARB_DEBUG_BLOCK_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x0000007c)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000080)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000080)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000080)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000084)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_BLOCK_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000084)
+#define HWIO_IPA_TX_ARB_DEBUG_CFG_BLOCK_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000084)
+#define HWIO_IPA_TX_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000088)
+#define HWIO_IPA_TX_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x00000088)
+#define HWIO_IPA_TX_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x00000088)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000008c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000008c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000008c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000090)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000090)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000090)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000094)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000094)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000094)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000098)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_BLOCK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000098)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CFG_BLOCK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000098)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000009c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000009c)
+#define HWIO_IPA_HPS_SEQ_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000009c)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000100)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000100)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000100)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_BLOCK_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000104)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_BLOCK_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000104)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_BLOCK_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000104)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_MASK_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000108)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_MASK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000108)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_MASK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000108)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_BLOCK_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000010c)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_BLOCK_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000010c)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CFG_BLOCK_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000010c)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000110)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000110)
+#define HWIO_IPA_DPS_SEQ_ARB_DEBUG_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000110)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_ALLOC_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+						     0x00000114)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_ALLOC_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000114)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_ALLOC_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000114)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_SRCH_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000118)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_SRCH_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000118)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_SRCH_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000118)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_REL_CFG_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000011c)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_REL_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000011c)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_REL_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000011c)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000120)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000120)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000120)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000124)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000124)
+#define HWIO_IPA_RSRC_MNGR_SW_ACCESS_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000124)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000128)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x00000128)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x00000128)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RMSK 0x3f77
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_ATTR 0x3
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_IN in_dword_masked( \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR,	\
+		HWIO_IPA_RSRC_MNGR_DB_CFG_RMSK)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR,	\
+		m)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_OUT(v) out_dword( \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR,	\
+		v)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_RSRC_MNGR_DB_CFG_IN)
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_ID_SEL_BMSK 0x3f00
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_ID_SEL_SHFT 0x8
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_TYPE_SEL_BMSK 0x70
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_TYPE_SEL_SHFT 0x4
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_GRP_SEL_BMSK 0x7
+#define HWIO_IPA_RSRC_MNGR_DB_CFG_RSRC_GRP_SEL_SHFT 0x0
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x0000012c)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x0000012c)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x0000012c)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RMSK 0x3f3
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_ATTR 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_IN in_dword_masked( \
+		HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_ADDR, \
+		HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RMSK)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_INM(m) in_dword_masked(	\
+		HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_ADDR, \
+		m)
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_NEXT_INDEX_BMSK 0x3f0
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_NEXT_INDEX_SHFT 0x4
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_NEXT_VALID_BMSK 0x2
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_NEXT_VALID_SHFT 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_OCCUPIED_BMSK 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_RSRC_READ_RSRC_OCCUPIED_SHFT 0x0
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000130)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000130)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000130)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RMSK 0x7f7f3f3
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_ATTR 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_IN in_dword_masked( \
+		HWIO_IPA_RSRC_MNGR_DB_LIST_READ_ADDR, \
+		HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RMSK)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_INM(m) in_dword_masked(	\
+		HWIO_IPA_RSRC_MNGR_DB_LIST_READ_ADDR, \
+		m)
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_ENTRY_CNT_BMSK 0x7f00000
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_ENTRY_CNT_SHFT 0x14
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HEAD_CNT_BMSK 0x7f000
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HEAD_CNT_SHFT 0xc
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HEAD_RSRC_BMSK 0x3f0
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HEAD_RSRC_SHFT 0x4
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HOLD_BMSK 0x2
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_HOLD_SHFT 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_VALID_BMSK 0x1
+#define HWIO_IPA_RSRC_MNGR_DB_LIST_READ_RSRC_LIST_VALID_SHFT 0x0
+#define HWIO_IPA_RSRC_MNGR_CONTEXTS_ADDR (IPA_DEBUG_REG_BASE + 0x00000134)
+#define HWIO_IPA_RSRC_MNGR_CONTEXTS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000134)
+#define HWIO_IPA_RSRC_MNGR_CONTEXTS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000134)
+#define HWIO_IPA_BRESP_DB_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000138)
+#define HWIO_IPA_BRESP_DB_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000138)
+#define HWIO_IPA_BRESP_DB_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000138)
+#define HWIO_IPA_BRESP_DB_DATA_ADDR (IPA_DEBUG_REG_BASE + 0x0000013c)
+#define HWIO_IPA_BRESP_DB_DATA_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000013c)
+#define HWIO_IPA_BRESP_DB_DATA_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000013c)
+#define HWIO_IPA_DEBUG_DATA_ADDR (IPA_DEBUG_REG_BASE + 0x00000204)
+#define HWIO_IPA_DEBUG_DATA_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000204)
+#define HWIO_IPA_DEBUG_DATA_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000204)
+#define HWIO_IPA_DEBUG_DATA_RMSK 0xffffffff
+#define HWIO_IPA_DEBUG_DATA_ATTR 0x1
+#define HWIO_IPA_DEBUG_DATA_IN in_dword_masked(HWIO_IPA_DEBUG_DATA_ADDR, \
+					       HWIO_IPA_DEBUG_DATA_RMSK)
+#define HWIO_IPA_DEBUG_DATA_INM(m) in_dword_masked( \
+		HWIO_IPA_DEBUG_DATA_ADDR, \
+		m)
+#define HWIO_IPA_DEBUG_DATA_DEBUG_DATA_BMSK 0xffffffff
+#define HWIO_IPA_DEBUG_DATA_DEBUG_DATA_SHFT 0x0
+#define HWIO_IPA_TESTBUS_SEL_ADDR (IPA_DEBUG_REG_BASE + 0x00000208)
+#define HWIO_IPA_TESTBUS_SEL_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000208)
+#define HWIO_IPA_TESTBUS_SEL_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000208)
+#define HWIO_IPA_TESTBUS_SEL_RMSK 0x1fffff1
+#define HWIO_IPA_TESTBUS_SEL_ATTR 0x3
+#define HWIO_IPA_TESTBUS_SEL_IN in_dword_masked(HWIO_IPA_TESTBUS_SEL_ADDR, \
+						HWIO_IPA_TESTBUS_SEL_RMSK)
+#define HWIO_IPA_TESTBUS_SEL_INM(m) in_dword_masked( \
+		HWIO_IPA_TESTBUS_SEL_ADDR, \
+		m)
+#define HWIO_IPA_TESTBUS_SEL_OUT(v) out_dword(HWIO_IPA_TESTBUS_SEL_ADDR, v)
+#define HWIO_IPA_TESTBUS_SEL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_TESTBUS_SEL_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_TESTBUS_SEL_IN)
+#define HWIO_IPA_TESTBUS_SEL_PIPE_SELECT_BMSK 0x1f00000
+#define HWIO_IPA_TESTBUS_SEL_PIPE_SELECT_SHFT 0x14
+#define HWIO_IPA_TESTBUS_SEL_INTERNAL_BLOCK_SELECT_BMSK 0xff000
+#define HWIO_IPA_TESTBUS_SEL_INTERNAL_BLOCK_SELECT_SHFT 0xc
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_BMSK 0xff0
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_SHFT 0x4
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_RX_FVAL 0x0
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_TX0_FVAL 0x1
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_FRAG_FVAL 0x2
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_UCP_FVAL 0x3
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_ENQUEUER_FVAL 0x4
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_ROUTER_FVAL 0x5
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_PKT_PARSER_FVAL 0x6
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_FILTER_NAT_FVAL 0x7
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_HDRI_RSRCREL_FVAL \
+	0x8
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_AHB2AHB_FVAL 0x9
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_MAXI2AXI_FVAL 0xa
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_DCMP_FVAL 0xb
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_DISPATCHER_FVAL 0xc
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_D_DCPH_FVAL 0xd
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_GSI_TEST_BUS_FVAL 0xe
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DEADBEAF_FVAL 0xf
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_MISC_FVAL 0x10
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_STTS_SNIFFER_FVAL \
+	0x11
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_QMB_0_FVAL 0x12
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_QMB_1_FVAL 0x13
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_UC_ACKQ_FVAL 0x14
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_RX_ACKQ_FVAL 0x15
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_TX1_FVAL 0x16
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_H_DCPH_FVAL 0x17
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_RX_HPS_CMDQ_FVAL 0x18
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_DPS_CMDQ_FVAL 0x19
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_TX_CMDQ_FVAL 0x1a
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_CMDQ_L_FVAL 0x1b
+#define	\
+	HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_RX_LEGACY_CMDQ_INT_FVAL \
+	0x1c
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_CTX_HANDLER_FVAL	\
+	0x1d
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_GSI_FVAL 0x1e
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_ACK_MNGR_CMDQ_FVAL 0x1f
+#define	\
+	HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_ENDP_INIT_CTRL_SUSPEND_FVAL \
+	0x20
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_ACL_WRAPPER_FVAL 0x22
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_TX_WRAPPER_FVAL \
+	0x23
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_AHB2AHB_BRIDGE_FVAL \
+	0x24
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_RSRC_TYPE_FVAL 0x31
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_RSRC_FVAL 0x32
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_ACKMNGR_FVAL 0x33
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_SEQ_FVAL 0x34
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_SEQ_FVAL 0x35
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_FTCH_FVAL 0x36
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_DPS_FTCH_FVAL 0x37
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_HPS_D_DCPH_2_FVAL 0x38
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_NTF_TX_CMDQ_FVAL 0x39
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_PROD_ACK_MNGR_CMDQ_FVAL \
+	0x3a
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_PROD_ACKMNGR_FVAL 0x3b
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_GSI_AHB2AHB_FVAL	\
+	0x3c
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_MAXI2AXI_PCIE_FVAL \
+	0x3d
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_QSB2AXI_FVAL 0x3e
+#define HWIO_IPA_TESTBUS_SEL_EXTERNAL_BLOCK_SELECT_IPA_UC_FVAL 0x3f
+#define HWIO_IPA_TESTBUS_SEL_TESTBUS_EN_BMSK 0x1
+#define HWIO_IPA_TESTBUS_SEL_TESTBUS_EN_SHFT 0x0
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000020c)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000020c)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000020c)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_RMSK 0xffffffff
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_ATTR 0x3
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_IN in_dword_masked( \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR, \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_RMSK)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_INM(m) in_dword_masked( \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR, \
+		m)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_OUT(v) out_dword( \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR, \
+		v)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_STEP_MODE_BREAKPOINTS_IN)
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_HW_EN_BMSK 0xffffffff
+#define HWIO_IPA_STEP_MODE_BREAKPOINTS_HW_EN_SHFT 0x0
+#define HWIO_IPA_STEP_MODE_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000210)
+#define HWIO_IPA_STEP_MODE_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x00000210)
+#define HWIO_IPA_STEP_MODE_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x00000210)
+#define HWIO_IPA_STEP_MODE_STATUS_RMSK 0xffffffff
+#define HWIO_IPA_STEP_MODE_STATUS_ATTR 0x1
+#define HWIO_IPA_STEP_MODE_STATUS_IN in_dword_masked( \
+		HWIO_IPA_STEP_MODE_STATUS_ADDR,	\
+		HWIO_IPA_STEP_MODE_STATUS_RMSK)
+#define HWIO_IPA_STEP_MODE_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_STEP_MODE_STATUS_ADDR,	\
+		m)
+#define HWIO_IPA_STEP_MODE_STATUS_HW_EN_BMSK 0xffffffff
+#define HWIO_IPA_STEP_MODE_STATUS_HW_EN_SHFT 0x0
+#define HWIO_IPA_STEP_MODE_GO_ADDR (IPA_DEBUG_REG_BASE + 0x00000214)
+#define HWIO_IPA_STEP_MODE_GO_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000214)
+#define HWIO_IPA_STEP_MODE_GO_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000214)
+#define HWIO_IPA_HW_EVENTS_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000218)
+#define HWIO_IPA_HW_EVENTS_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000218)
+#define HWIO_IPA_HW_EVENTS_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000218)
+#define HWIO_IPA_LOG_ADDR (IPA_DEBUG_REG_BASE + 0x0000021c)
+#define HWIO_IPA_LOG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000021c)
+#define HWIO_IPA_LOG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000021c)
+#define HWIO_IPA_LOG_RMSK 0x3ff1f2
+#define HWIO_IPA_LOG_ATTR 0x3
+#define HWIO_IPA_LOG_IN in_dword_masked(HWIO_IPA_LOG_ADDR, \
+					HWIO_IPA_LOG_RMSK)
+#define HWIO_IPA_LOG_INM(m) in_dword_masked(HWIO_IPA_LOG_ADDR, m)
+#define HWIO_IPA_LOG_OUT(v) out_dword(HWIO_IPA_LOG_ADDR, v)
+#define HWIO_IPA_LOG_OUTM(m, v) out_dword_masked_ns(HWIO_IPA_LOG_ADDR, \
+						    m, \
+						    v, \
+						    HWIO_IPA_LOG_IN)
+#define HWIO_IPA_LOG_LOG_DPL_L2_REMOVE_EN_BMSK 0x200000
+#define HWIO_IPA_LOG_LOG_DPL_L2_REMOVE_EN_SHFT 0x15
+#define HWIO_IPA_LOG_LOG_REDUCTION_EN_BMSK 0x100000
+#define HWIO_IPA_LOG_LOG_REDUCTION_EN_SHFT 0x14
+#define HWIO_IPA_LOG_LOG_LENGTH_BMSK 0xff000
+#define HWIO_IPA_LOG_LOG_LENGTH_SHFT 0xc
+#define HWIO_IPA_LOG_LOG_PIPE_BMSK 0x1f0
+#define HWIO_IPA_LOG_LOG_PIPE_SHFT 0x4
+#define HWIO_IPA_LOG_LOG_EN_BMSK 0x2
+#define HWIO_IPA_LOG_LOG_EN_SHFT 0x1
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR (IPA_DEBUG_REG_BASE + 0x00000224)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000224)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000224)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_IN)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_START_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_START_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000228)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000228)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000228)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_IN)
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_START_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_ADDR_MSB_START_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000022c)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000022c)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000022c)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_ADDR,	\
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_ADDR,	\
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_WRITR_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_WRITR_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000230)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000230)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000230)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_WRITR_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_WRITE_PTR_MSB_WRITR_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000234)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000234)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000234)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_RMSK 0x3ffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_IN in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_CMD_CFG_IN)
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_SKIP_DDR_DMA_BMSK 0x20000
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_SKIP_DDR_DMA_SHFT 0x11
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_ENABLE_BMSK 0x10000
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_ENABLE_SHFT 0x10
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_SIZE_BMSK 0xffff
+#define HWIO_IPA_LOG_BUF_HW_CMD_CFG_SIZE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000238)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000238)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000238)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_RMSK 0xbfff3fff
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_INM(m) in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_SKIP_DDR_WRAP_HAPPENED_BMSK \
+	0x80000000
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_SKIP_DDR_WRAP_HAPPENED_SHFT 0x1f
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_WRITE_PTR_BMSK 0x3fff0000
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_WRITE_PTR_SHFT 0x10
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_READ_PTR_BMSK 0x3fff
+#define HWIO_IPA_LOG_BUF_HW_CMD_RAM_PTR_READ_PTR_SHFT 0x0
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_LSB_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000023c)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_LSB_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000023c)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_LSB_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000023c)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_MSB_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000240)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_MSB_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000240)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_MSB_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000240)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_RESULT_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000244)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_RESULT_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000244)
+#define HWIO_IPA_STEP_MODE_HFETCHER_ADDR_RESULT_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000244)
+#define HWIO_IPA_STEP_MODE_HSEQ_BREAKPOINT_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000248)
+#define HWIO_IPA_STEP_MODE_HSEQ_BREAKPOINT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000248)
+#define HWIO_IPA_STEP_MODE_HSEQ_BREAKPOINT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000248)
+#define HWIO_IPA_STEP_MODE_HSEQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000024c)
+#define HWIO_IPA_STEP_MODE_HSEQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000024c)
+#define HWIO_IPA_STEP_MODE_HSEQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000024c)
+#define HWIO_IPA_STEP_MODE_DSEQ_BREAKPOINT_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000250)
+#define HWIO_IPA_STEP_MODE_DSEQ_BREAKPOINT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000250)
+#define HWIO_IPA_STEP_MODE_DSEQ_BREAKPOINT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000250)
+#define HWIO_IPA_STEP_MODE_DSEQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000254)
+#define HWIO_IPA_STEP_MODE_DSEQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000254)
+#define HWIO_IPA_STEP_MODE_DSEQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000254)
+#define HWIO_IPA_RX_ACKQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000258)
+#define HWIO_IPA_RX_ACKQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000258)
+#define HWIO_IPA_RX_ACKQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000258)
+#define HWIO_IPA_RX_ACKQ_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x0000025c)
+#define HWIO_IPA_RX_ACKQ_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000025c)
+#define HWIO_IPA_RX_ACKQ_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000025c)
+#define HWIO_IPA_RX_ACKQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + 0x00000260)
+#define HWIO_IPA_RX_ACKQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000260)
+#define HWIO_IPA_RX_ACKQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000260)
+#define HWIO_IPA_RX_ACKQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + 0x00000264)
+#define HWIO_IPA_RX_ACKQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000264)
+#define HWIO_IPA_RX_ACKQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000264)
+#define HWIO_IPA_RX_ACKQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000268)
+#define HWIO_IPA_RX_ACKQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000268)
+#define HWIO_IPA_RX_ACKQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000268)
+#define HWIO_IPA_UC_ACKQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x0000026c)
+#define HWIO_IPA_UC_ACKQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000026c)
+#define HWIO_IPA_UC_ACKQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000026c)
+#define HWIO_IPA_UC_ACKQ_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x00000270)
+#define HWIO_IPA_UC_ACKQ_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000270)
+#define HWIO_IPA_UC_ACKQ_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000270)
+#define HWIO_IPA_UC_ACKQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + 0x00000274)
+#define HWIO_IPA_UC_ACKQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000274)
+#define HWIO_IPA_UC_ACKQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000274)
+#define HWIO_IPA_UC_ACKQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + 0x00000278)
+#define HWIO_IPA_UC_ACKQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000278)
+#define HWIO_IPA_UC_ACKQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000278)
+#define HWIO_IPA_UC_ACKQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x0000027c)
+#define HWIO_IPA_UC_ACKQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000027c)
+#define HWIO_IPA_UC_ACKQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000027c)
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x00000280 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000280 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000280 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RMSK 0x7f
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_ATTR 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_CMD_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_PKT_ENHANCED_BMSK 0x40
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_PKT_ENHANCED_SHFT 0x6
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_WR_PKT_BMSK 0x20
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_WR_PKT_SHFT 0x5
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_PKT_BMSK 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_PKT_SHFT 0x4
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_WR_CMD_BMSK 0x8
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_WR_CMD_SHFT 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_CMD_BMSK 0x4
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_RELEASE_RD_CMD_SHFT 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_POP_CMD_BMSK 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_POP_CMD_SHFT 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_CMD_n_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x00000284 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000284 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000284 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_RMSK 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_OUTMI(n, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_CFG_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_BLOCK_WR_BMSK 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_BLOCK_WR_SHFT 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_BLOCK_RD_BMSK 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_CFG_n_BLOCK_RD_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x00000288 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000288 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000288 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_CMDQ_SRC_LEN_F_BMSK 0xffff0000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_CMDQ_SRC_LEN_F_SHFT 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_CMDQ_PACKET_LEN_F_BMSK 0xffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_0_n_CMDQ_PACKET_LEN_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x0000028c + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000028c + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000028c + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_METADATA_F_BMSK 0xff000000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_METADATA_F_SHFT 0x18
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_OPCODE_F_BMSK 0xff0000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_OPCODE_F_SHFT 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_FLAGS_F_BMSK 0xfc00
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_FLAGS_F_SHFT 0xa
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_ORDER_F_BMSK 0x300
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_ORDER_F_SHFT 0x8
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_SRC_PIPE_F_BMSK 0xff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_1_n_CMDQ_SRC_PIPE_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x00000290 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000290 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000290 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_CMDQ_ADDR_LSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_2_n_CMDQ_ADDR_LSB_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x00000294 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000294 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000294 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ATTR 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(n), \
+		val)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_OUTMI(n, mask, \
+						val) out_dword_masked_ns( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_ADDR(	\
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_INI(n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_CMDQ_ADDR_MSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_WR_3_n_CMDQ_ADDR_MSB_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x00000298 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000298 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000298 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_CMDQ_SRC_LEN_F_BMSK 0xffff0000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_CMDQ_SRC_LEN_F_SHFT 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_CMDQ_PACKET_LEN_F_BMSK 0xffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_0_n_CMDQ_PACKET_LEN_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x0000029c + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000029c + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000029c + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_METADATA_F_BMSK 0xff000000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_METADATA_F_SHFT 0x18
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_OPCODE_F_BMSK 0xff0000
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_OPCODE_F_SHFT 0x10
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_FLAGS_F_BMSK 0xfc00
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_FLAGS_F_SHFT 0xa
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_ORDER_F_BMSK 0x300
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_ORDER_F_SHFT 0x8
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_SRC_PIPE_F_BMSK 0xff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_1_n_CMDQ_SRC_PIPE_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x000002a0 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x000002a0 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x000002a0 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_CMDQ_ADDR_LSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_2_n_CMDQ_ADDR_LSB_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_ADDR(n) (IPA_DEBUG_REG_BASE +	\
+						   0x000002a4 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x000002a4 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x000002a4 + 0x2C * \
+						   (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_RMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_ADDR(n), \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_CMDQ_ADDR_MSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_SPLT_CMDQ_DATA_RD_3_n_CMDQ_ADDR_MSB_F_SHFT 0x0
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+						0x000002a8 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+						0x000002a8 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+						0x000002a8 + 0x2C * (n))
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_RMSK 0x7f
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_MAXn 3
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_ATTR 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_INI(n) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_ADDR(n),	\
+		HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_RMSK)
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_DEPTH_BMSK 0x60
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_DEPTH_SHFT 0x5
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_COUNT_BMSK 0x18
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_COUNT_SHFT 0x3
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_FULL_BMSK 0x4
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_FULL_SHFT 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_EMPTY_BMSK 0x2
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_CMDQ_EMPTY_SHFT 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_STATUS_BMSK 0x1
+#define HWIO_IPA_RX_SPLT_CMDQ_STATUS_n_STATUS_SHFT 0x0
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000035c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000035c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000035c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000360)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000360)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000360)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000364)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000364)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000364)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_1_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000368)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_1_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000368)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_1_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000368)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_2_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x0000036c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_2_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x0000036c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_WR_2_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x0000036c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000370)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000370)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000370)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_1_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000374)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_1_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000374)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_1_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000374)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_2_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000378)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_2_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000378)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_DATA_RD_2_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000378)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000037c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000037c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000037c)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_RMSK 0x7
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_IN in_dword_masked( \
+		HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_ADDR,	\
+		HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_ADDR,	\
+		m)
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_CMDQ_FULL_BMSK 0x4
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_CMDQ_FULL_SHFT 0x2
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_CMDQ_EMPTY_BMSK 0x2
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_CMDQ_EMPTY_SHFT 0x1
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_TX_COMMANDER_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000380)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000380)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000380)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_RMSK 0x3f
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CMDQ_CMD_IN)
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_RD_REQ_BMSK 0x20
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_RD_REQ_SHFT 0x5
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_CMD_CLIENT_BMSK 0x1c
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000384)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000384)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000384)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000388)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000388)
+#define HWIO_IPA_RX_HPS_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000388)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x0000038c)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000038c)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000038c)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000390)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000390)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000390)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000394)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000394)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000394)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000398)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000398)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000398)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000039c)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000039c)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000039c)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003a0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003a0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003a0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003a4)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003a4)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003a4)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_RMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_CMDQ_DEST_LEN_F_BMSK 0xffff0000
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_CMDQ_DEST_LEN_F_SHFT 0x10
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_CMDQ_PACKET_LEN_F_BMSK 0xffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_0_CMDQ_PACKET_LEN_F_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003a8)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003a8)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003a8)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_RMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_METADATA_F_BMSK 0xff000000
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_METADATA_F_SHFT 0x18
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_OPCODE_F_BMSK 0xff0000
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_OPCODE_F_SHFT 0x10
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_FLAGS_F_BMSK 0xfc00
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_FLAGS_F_SHFT 0xa
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_ORDER_F_BMSK 0x300
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_ORDER_F_SHFT 0x8
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_SRC_PIPE_F_BMSK 0xff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_1_CMDQ_SRC_PIPE_F_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003ac)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003ac)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003ac)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_RMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_CMDQ_ADDR_LSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_2_CMDQ_ADDR_LSB_F_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000003b0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000003b0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000003b0)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_RMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_CMDQ_ADDR_MSB_F_BMSK 0xffffffff
+#define HWIO_IPA_RX_HPS_CMDQ_DATA_RD_3_CMDQ_ADDR_MSB_F_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000003b4)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x000003b4)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x000003b4)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						0x000003b8)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x000003b8)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x000003b8)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_RMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_ADDR,	\
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_ADDR,	\
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_RX_HPS_SNP_ADDR (IPA_DEBUG_REG_BASE + 0x000003bc)
+#define HWIO_IPA_RX_HPS_SNP_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x000003bc)
+#define HWIO_IPA_RX_HPS_SNP_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x000003bc)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x000003c0)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x000003c0)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x000003c0)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_IN in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_RX_HPS_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_RX_HPS_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x000003c4)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x000003c4)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x000003c4)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_RMSK 0xff0f0f0f
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR, \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_RMSK)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_IN)
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_4_MIN_DEPTH_BMSK \
+	0xf0000000
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_4_MIN_DEPTH_SHFT 0x1c
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_3_MIN_DEPTH_BMSK \
+	0xf000000
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_3_MIN_DEPTH_SHFT 0x18
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_2_MIN_DEPTH_BMSK \
+	0xf0000
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_2_MIN_DEPTH_SHFT 0x10
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_1_MIN_DEPTH_BMSK 0xf00
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_1_MIN_DEPTH_SHFT 0x8
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_0_MIN_DEPTH_BMSK 0xf
+#define HWIO_IPA_RX_HPS_CLIENTS_MIN_DEPTH_0_CLIENT_0_MIN_DEPTH_SHFT 0x0
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x000003cc)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x000003cc)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x000003cc)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_RMSK 0xff0f0f0f
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_IN in_dword_masked(	\
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR, \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_RMSK)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_INM(m) in_dword_masked( \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR, \
+		m)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_OUT(v) out_dword( \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR, \
+		v)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_IN)
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_4_MAX_DEPTH_BMSK \
+	0xf0000000
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_4_MAX_DEPTH_SHFT 0x1c
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_3_MAX_DEPTH_BMSK \
+	0xf000000
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_3_MAX_DEPTH_SHFT 0x18
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_2_MAX_DEPTH_BMSK \
+	0xf0000
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_2_MAX_DEPTH_SHFT 0x10
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_1_MAX_DEPTH_BMSK 0xf00
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_1_MAX_DEPTH_SHFT 0x8
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_0_MAX_DEPTH_BMSK 0xf
+#define HWIO_IPA_RX_HPS_CLIENTS_MAX_DEPTH_0_CLIENT_0_MAX_DEPTH_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x000003d4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x000003d4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x000003d4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_RMSK 0xff
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR,	\
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR,	\
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR,	\
+		v)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_HPS_DPS_CMDQ_CMD_IN)
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_CMD_CLIENT_BMSK 0x7c
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000003d8)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000003d8)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000003d8)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000003dc)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000003dc)
+#define HWIO_IPA_HPS_DPS_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000003dc)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x000003e0)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000003e0)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000003e0)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x000003e4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000003e4)
+#define HWIO_IPA_HPS_DPS_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000003e4)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x000003e8)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x000003e8)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x000003e8)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x000003ec)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x000003ec)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x000003ec)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_RMSK 0xfffff
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_ATTR 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_IN in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_ADDR, \
+		HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_INM(m) in_dword_masked(	\
+		HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_ADDR, \
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_REP_F_BMSK 0x80000
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_REP_F_SHFT 0x13
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_BMSK 0x60000
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_SHFT 0x11
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_BMSK 0x1f000
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_SHFT 0xc
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_BMSK 0xff0
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_SHFT 0x4
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_BMSK 0xf
+#define HWIO_IPA_HPS_DPS_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000003f0)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000003f0)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000003f0)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_RMSK 0xff
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_IN in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0xfc
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x000003f4)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x000003f4)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x000003f4)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_RMSK 0x7fffffff
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_ADDR, \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_ADDR, \
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x7fffffff
+#define HWIO_IPA_HPS_DPS_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_HPS_DPS_SNP_ADDR (IPA_DEBUG_REG_BASE + 0x000003f8)
+#define HWIO_IPA_HPS_DPS_SNP_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x000003f8)
+#define HWIO_IPA_HPS_DPS_SNP_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x000003f8)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x000003fc)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x000003fc)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x000003fc)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_RMSK 0x3f
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_IN in_dword_masked(	\
+		HWIO_IPA_HPS_DPS_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_HPS_DPS_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_HPS_DPS_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_FIFO_COUNT_BMSK 0x3f
+#define HWIO_IPA_HPS_DPS_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000400)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000400)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000400)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_RMSK 0xbf
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR, \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR, \
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR, \
+		v)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_DPS_TX_CMDQ_CMD_IN)
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_CMD_CLIENT_BMSK 0x3c
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000404)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000404)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000404)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000408)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000408)
+#define HWIO_IPA_DPS_TX_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000408)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x0000040c)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000040c)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000040c)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000410)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000410)
+#define HWIO_IPA_DPS_TX_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000410)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000414)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000414)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000414)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000418)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000418)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000418)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_RMSK 0xfffff
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_ATTR 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_IN in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_ADDR, \
+		HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_ADDR, \
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_REP_F_BMSK 0x80000
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_REP_F_SHFT 0x13
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_BMSK 0x60000
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_SHFT 0x11
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_BMSK 0x1f000
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_SHFT 0xc
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_BMSK 0xff0
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_SHFT 0x4
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_BMSK 0xf
+#define HWIO_IPA_DPS_TX_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_SHFT 0x0
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x0000041c)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000041c)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000041c)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_IN in_dword_masked(	\
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000420)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000420)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000420)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_RMSK 0x3ff
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_ADDR,	\
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_ADDR,	\
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x3ff
+#define HWIO_IPA_DPS_TX_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_DPS_TX_SNP_ADDR (IPA_DEBUG_REG_BASE + 0x00000424)
+#define HWIO_IPA_DPS_TX_SNP_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000424)
+#define HWIO_IPA_DPS_TX_SNP_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000424)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x00000428)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000428)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000428)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_IN in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_DPS_TX_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_DPS_TX_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_DPS_TX_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000042c)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000042c)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000042c)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_RMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_IN)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_BITMAP_BMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_EN_BITMAP_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000430)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000430)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000430)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_RMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_INM(m) in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_OUTM(m,	\
+						     v)	\
+	out_dword_masked_ns(HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_ADDR, \
+			    m, \
+			    v, \
+			    HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_IN)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_BITMAP_BMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL_BITMAP_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000434)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000434)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000434)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_RMSK 0xfff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_IN in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_IN)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ALL_CLI_MUX_CONCAT_BMSK 0xfff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX_ALL_CLI_MUX_CONCAT_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000438 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000438 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000438 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_0_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x0000043c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000043c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000043c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_1_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000440 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000440 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000440 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_2_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000444 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000444 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000444 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_COMP_VAL_3_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000468 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000468 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000468 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_0_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x0000046c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000046c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000046c + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_1_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000470 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000470 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000470 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_2_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR(n) ( \
+		IPA_DEBUG_REG_BASE + 0x00000474 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_PHYS(n) ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000474 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_OFFS(n) ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000474 + 0x10 * (n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_MAXn 2
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_INI(n) \
+	in_dword_masked(HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR( \
+				n), \
+			HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_INMI(n, \
+							  mask)	\
+	in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_OUTI(n, \
+							  val) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR( \
+			n), \
+		val)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_OUTMI(n, mask, \
+							   val)	\
+	out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_INI(n))
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_HW_SNIF_EL_MASK_VAL_3_CLI_n_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000498)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000498)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000498)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_RMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_RMSK)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_IN)
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_SRC_GROUP_SEL_BMSK 0x7
+#define HWIO_IPA_LOG_BUF_HW_SNIF_LEGACY_RX_SRC_GROUP_SEL_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x000004a0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					0x000004a0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					0x000004a0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_RMSK 0xff
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR,	\
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR,	\
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR,	\
+		v)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_ADDR,	\
+		m, \
+		v, \
+		HWIO_IPA_ACKMNGR_CMDQ_CMD_IN)
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_CMD_CLIENT_BMSK 0x7c
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000004a4)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000004a4)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000004a4)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000004a8)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000004a8)
+#define HWIO_IPA_ACKMNGR_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000004a8)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x000004ac)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004ac)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004ac)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x000004b0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004b0)
+#define HWIO_IPA_ACKMNGR_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004b0)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					    0x000004b4)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					    0x000004b4)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					    0x000004b4)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					    0x000004b8)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					    0x000004b8)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					    0x000004b8)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_RMSK 0x7ffffff
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_ATTR 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_IN in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_VALID_BMSK 0x4000000
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_VALID_SHFT 0x1a
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SENT_BMSK 0x2000000
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SENT_SHFT 0x19
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_ORIGIN_BMSK 0x1000000
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_ORIGIN_SHFT 0x18
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_LENGTH_BMSK 0xffff00
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_LENGTH_SHFT 0x8
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_BMSK 0xff
+#define HWIO_IPA_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000004bc)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004bc)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004bc)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_IN in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x000004c0)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x000004c0)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x000004c0)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_RMSK 0x1fff
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR, \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR, \
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x1fff
+#define HWIO_IPA_ACKMNGR_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x000004c4)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x000004c4)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x000004c4)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_IN in_dword_masked(	\
+		HWIO_IPA_ACKMNGR_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_ACKMNGR_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_ACKMNGR_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_ACKMNGR_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR (IPA_DEBUG_REG_BASE + \
+					    0x000004c8)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					    0x000004c8)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					    0x000004c8)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_RMSK 0x3f
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_ATTR 0x3
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IN in_dword_masked( \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR, \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_RMSK)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR, \
+		m)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_OUT(v) out_dword(	\
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR, \
+		v)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_GSI_FIFO_STATUS_CTRL_IN)
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IPA_GSI_FIFO_STATUS_EN_BMSK 0x20
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IPA_GSI_FIFO_STATUS_EN_SHFT 0x5
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IPA_GSI_FIFO_STATUS_PORT_SEL_BMSK	\
+	0x1f
+#define HWIO_IPA_GSI_FIFO_STATUS_CTRL_IPA_GSI_FIFO_STATUS_PORT_SEL_SHFT	\
+	0x0
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000004cc)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004cc)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004cc)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_RMSK 0x7fffffff
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_ATTR 0x1
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_IN in_dword_masked( \
+		HWIO_IPA_GSI_TLV_FIFO_STATUS_ADDR, \
+		HWIO_IPA_GSI_TLV_FIFO_STATUS_RMSK)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_TLV_FIFO_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_HEAD_IS_BUBBLE_BMSK 0x40000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_HEAD_IS_BUBBLE_SHFT 0x1e
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_FULL_PUB_BMSK 0x20000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_FULL_PUB_SHFT 0x1d
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_ALMOST_FULL_PUB_BMSK 0x10000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_ALMOST_FULL_PUB_SHFT 0x1c
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_FULL_BMSK 0x8000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_FULL_SHFT 0x1b
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_ALMOST_FULL_BMSK 0x4000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_ALMOST_FULL_SHFT 0x1a
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_EMPTY_PUB_BMSK 0x2000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_EMPTY_PUB_SHFT 0x19
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_EMPTY_BMSK 0x1000000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_EMPTY_SHFT 0x18
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_RD_PUB_PTR_BMSK 0xff0000
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_RD_PUB_PTR_SHFT 0x10
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_RD_PTR_BMSK 0xff00
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_RD_PTR_SHFT 0x8
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_WR_PTR_BMSK 0xff
+#define HWIO_IPA_GSI_TLV_FIFO_STATUS_FIFO_WR_PTR_SHFT 0x0
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000004d0)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x000004d0)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x000004d0)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_RMSK 0x7fffffff
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_ATTR 0x1
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_IN in_dword_masked( \
+		HWIO_IPA_GSI_AOS_FIFO_STATUS_ADDR, \
+		HWIO_IPA_GSI_AOS_FIFO_STATUS_RMSK)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_GSI_AOS_FIFO_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_HEAD_IS_BUBBLE_BMSK 0x40000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_HEAD_IS_BUBBLE_SHFT 0x1e
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_FULL_PUB_BMSK 0x20000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_FULL_PUB_SHFT 0x1d
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_ALMOST_FULL_PUB_BMSK 0x10000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_ALMOST_FULL_PUB_SHFT 0x1c
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_FULL_BMSK 0x8000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_FULL_SHFT 0x1b
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_ALMOST_FULL_BMSK 0x4000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_ALMOST_FULL_SHFT 0x1a
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_EMPTY_PUB_BMSK 0x2000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_EMPTY_PUB_SHFT 0x19
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_EMPTY_BMSK 0x1000000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_EMPTY_SHFT 0x18
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_RD_PUB_PTR_BMSK 0xff0000
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_RD_PUB_PTR_SHFT 0x10
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_RD_PTR_BMSK 0xff00
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_RD_PTR_SHFT 0x8
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_WR_PTR_BMSK 0xff
+#define HWIO_IPA_GSI_AOS_FIFO_STATUS_FIFO_WR_PTR_SHFT 0x0
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_TLV_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x000004d4)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_TLV_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x000004d4)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_TLV_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x000004d4)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_AOS_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000548)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_AOS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000548)
+#define HWIO_IPA_ENDP_GSI_CONS_BYTES_AOS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000548)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004d8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004d8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004d8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_0_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004dc)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004dc)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004dc)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_1_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004e0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004e0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004e0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_2_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004e4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004e4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004e4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_3_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004e8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004e8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004e8)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_4_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004ec)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004ec)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004ec)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_5_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004f0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004f0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004f0)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_6_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004f4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004f4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004f4)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_IN)
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_COMP_VAL_7_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004f8)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004f8)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004f8)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_0_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x000004fc)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000004fc)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000004fc)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_1_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000500)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000500)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000500)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_2_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000504)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000504)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000504)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_3_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000508)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000508)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000508)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_4_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000050c)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000050c)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000050c)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_5_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000510)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000510)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000510)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_6_VALUE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000514)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000514)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000514)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_RMSK)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_IN)
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_VALUE_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_SW_MASK_VAL_7_VALUE_SHFT 0x0
+#define HWIO_IPA_UC_RX_HND_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000518)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000518)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000518)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CFG_ADDR (IPA_DEBUG_REG_BASE + 0x0000051c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CFG_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000051c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_CFG_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000051c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000520)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000520)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000520)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_1_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000524)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000524)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000524)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_2_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000528)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000528)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000528)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_3_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000052c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000052c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_WR_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000052c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000530)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000530)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000530)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_1_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000534)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000534)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000534)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_2_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000538)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000538)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000538)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_3_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000053c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000053c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_DATA_RD_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000053c)
+#define HWIO_IPA_UC_RX_HND_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000540)
+#define HWIO_IPA_UC_RX_HND_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000540)
+#define HWIO_IPA_UC_RX_HND_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000540)
+#define HWIO_IPA_RAM_HW_FIRST_ADDR (IPA_DEBUG_REG_BASE + 0x0000054c)
+#define HWIO_IPA_RAM_HW_FIRST_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000054c)
+#define HWIO_IPA_RAM_HW_FIRST_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000054c)
+#define HWIO_IPA_RAM_HW_LAST_ADDR (IPA_DEBUG_REG_BASE + 0x00000550)
+#define HWIO_IPA_RAM_HW_LAST_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000550)
+#define HWIO_IPA_RAM_HW_LAST_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000550)
+#define HWIO_IPA_RAM_SNIFFER_BASE_OFFSET_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000554)
+#define HWIO_IPA_RAM_SNIFFER_BASE_OFFSET_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000554)
+#define HWIO_IPA_RAM_SNIFFER_BASE_OFFSET_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000554)
+#define HWIO_IPA_RAM_FRAG_FRST_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000558)
+#define HWIO_IPA_RAM_FRAG_FRST_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000558)
+#define HWIO_IPA_RAM_FRAG_FRST_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000558)
+#define HWIO_IPA_RAM_FRAG_SCND_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x0000055c)
+#define HWIO_IPA_RAM_FRAG_SCND_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x0000055c)
+#define HWIO_IPA_RAM_FRAG_SCND_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x0000055c)
+#define HWIO_IPA_RAM_GSI_TLV_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000560)
+#define HWIO_IPA_RAM_GSI_TLV_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000560)
+#define HWIO_IPA_RAM_GSI_TLV_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000560)
+#define HWIO_IPA_RAM_DCPH_KEYS_FIRST_ADDR (IPA_DEBUG_REG_BASE + 0x00000564)
+#define HWIO_IPA_RAM_DCPH_KEYS_FIRST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000564)
+#define HWIO_IPA_RAM_DCPH_KEYS_FIRST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000564)
+#define HWIO_IPA_RAM_DCPH_KEYS_LAST_ADDR (IPA_DEBUG_REG_BASE + 0x00000568)
+#define HWIO_IPA_RAM_DCPH_KEYS_LAST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000568)
+#define HWIO_IPA_RAM_DCPH_KEYS_LAST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000568)
+#define HWIO_IPA_DPS_SEQUENCER_FIRST_ADDR (IPA_DEBUG_REG_BASE + 0x00000570)
+#define HWIO_IPA_DPS_SEQUENCER_FIRST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000570)
+#define HWIO_IPA_DPS_SEQUENCER_FIRST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000570)
+#define HWIO_IPA_DPS_SEQUENCER_LAST_ADDR (IPA_DEBUG_REG_BASE + 0x00000574)
+#define HWIO_IPA_DPS_SEQUENCER_LAST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000574)
+#define HWIO_IPA_DPS_SEQUENCER_LAST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000574)
+#define HWIO_IPA_HPS_SEQUENCER_FIRST_ADDR (IPA_DEBUG_REG_BASE + 0x00000578)
+#define HWIO_IPA_HPS_SEQUENCER_FIRST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					   0x00000578)
+#define HWIO_IPA_HPS_SEQUENCER_FIRST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					   0x00000578)
+#define HWIO_IPA_HPS_SEQUENCER_LAST_ADDR (IPA_DEBUG_REG_BASE + 0x0000057c)
+#define HWIO_IPA_HPS_SEQUENCER_LAST_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000057c)
+#define HWIO_IPA_HPS_SEQUENCER_LAST_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000057c)
+#define HWIO_IPA_RAM_PKT_CTX_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000650)
+#define HWIO_IPA_RAM_PKT_CTX_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000650)
+#define HWIO_IPA_RAM_PKT_CTX_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000650)
+#define HWIO_IPA_RAM_SW_AREA_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000654)
+#define HWIO_IPA_RAM_SW_AREA_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000654)
+#define HWIO_IPA_RAM_SW_AREA_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000654)
+#define HWIO_IPA_RAM_HDRI_TYPE1_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000658)
+#define HWIO_IPA_RAM_HDRI_TYPE1_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000658)
+#define HWIO_IPA_RAM_HDRI_TYPE1_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000658)
+#define HWIO_IPA_RAM_AGGR_NLO_COUNTERS_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x0000065c)
+#define HWIO_IPA_RAM_AGGR_NLO_COUNTERS_BASE_ADDR_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000065c)
+#define HWIO_IPA_RAM_AGGR_NLO_COUNTERS_BASE_ADDR_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000065c)
+#define HWIO_IPA_RAM_NLO_VP_CACHE_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000660)
+#define HWIO_IPA_RAM_NLO_VP_CACHE_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000660)
+#define HWIO_IPA_RAM_NLO_VP_CACHE_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000660)
+#define HWIO_IPA_RAM_COAL_VP_CACHE_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE +	\
+						   0x00000664)
+#define HWIO_IPA_RAM_COAL_VP_CACHE_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						   + 0x00000664)
+#define HWIO_IPA_RAM_COAL_VP_CACHE_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						   + 0x00000664)
+#define HWIO_IPA_RAM_COAL_VP_FIFO_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000668)
+#define HWIO_IPA_RAM_COAL_VP_FIFO_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000668)
+#define HWIO_IPA_RAM_COAL_VP_FIFO_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000668)
+#define HWIO_IPA_RAM_GSI_IF_CONS_ACCUMS_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE \
+							+ 0x0000066c)
+#define HWIO_IPA_RAM_GSI_IF_CONS_ACCUMS_BASE_ADDR_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000066c)
+#define HWIO_IPA_RAM_GSI_IF_CONS_ACCUMS_BASE_ADDR_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000066c)
+#define HWIO_IPA_RAM_AGGR_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + 0x00000670)
+#define HWIO_IPA_RAM_AGGR_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000670)
+#define HWIO_IPA_RAM_AGGR_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000670)
+#define HWIO_IPA_RAM_TX_COUNTERS_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000674)
+#define HWIO_IPA_RAM_TX_COUNTERS_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000674)
+#define HWIO_IPA_RAM_TX_COUNTERS_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000674)
+#define HWIO_IPA_RAM_DPL_FIFO_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000678)
+#define HWIO_IPA_RAM_DPL_FIFO_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000678)
+#define HWIO_IPA_RAM_DPL_FIFO_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000678)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_CTX_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE \
+							+ 0x0000067c)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_CTX_BASE_ADDR_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000067c)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_CTX_BASE_ADDR_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000067c)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_AGGR_BASE_ADDR_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000680)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_AGGR_BASE_ADDR_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000680)
+#define HWIO_IPA_RAM_COAL_MASTER_VP_AGGR_BASE_ADDR_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000680)
+#define HWIO_IPA_RAM_COAL_SLAVE_VP_CTX_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE \
+						       + 0x00000684)
+#define HWIO_IPA_RAM_COAL_SLAVE_VP_CTX_BASE_ADDR_PHYS (	\
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000684)
+#define HWIO_IPA_RAM_COAL_SLAVE_VP_CTX_BASE_ADDR_OFFS (	\
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000684)
+#define HWIO_IPA_RAM_UL_NLO_AGGR_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000688)
+#define HWIO_IPA_RAM_UL_NLO_AGGR_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000688)
+#define HWIO_IPA_RAM_UL_NLO_AGGR_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000688)
+#define HWIO_IPA_RAM_UC_IRAM_ADDR_BASE_ADDR_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x0000069c)
+#define HWIO_IPA_RAM_UC_IRAM_ADDR_BASE_ADDR_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x0000069c)
+#define HWIO_IPA_RAM_UC_IRAM_ADDR_BASE_ADDR_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x0000069c)
+#define HWIO_IPA_HPS_UC2SEQ_PUSH_ADDR (IPA_DEBUG_REG_BASE + 0x00000580)
+#define HWIO_IPA_HPS_UC2SEQ_PUSH_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000580)
+#define HWIO_IPA_HPS_UC2SEQ_PUSH_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000580)
+#define HWIO_IPA_HPS_UC2SEQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000584)
+#define HWIO_IPA_HPS_UC2SEQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000584)
+#define HWIO_IPA_HPS_UC2SEQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000584)
+#define HWIO_IPA_HPS_SEQ2UC_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000588)
+#define HWIO_IPA_HPS_SEQ2UC_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000588)
+#define HWIO_IPA_HPS_SEQ2UC_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000588)
+#define HWIO_IPA_HPS_SEQ2UC_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x0000058c)
+#define HWIO_IPA_HPS_SEQ2UC_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x0000058c)
+#define HWIO_IPA_HPS_SEQ2UC_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x0000058c)
+#define HWIO_IPA_HPS_SEQ2UC_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000590)
+#define HWIO_IPA_HPS_SEQ2UC_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000590)
+#define HWIO_IPA_HPS_SEQ2UC_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000590)
+#define HWIO_IPA_DPS_UC2SEQ_PUSH_ADDR (IPA_DEBUG_REG_BASE + 0x00000594)
+#define HWIO_IPA_DPS_UC2SEQ_PUSH_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000594)
+#define HWIO_IPA_DPS_UC2SEQ_PUSH_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000594)
+#define HWIO_IPA_DPS_UC2SEQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x00000598)
+#define HWIO_IPA_DPS_UC2SEQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000598)
+#define HWIO_IPA_DPS_UC2SEQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000598)
+#define HWIO_IPA_DPS_SEQ2UC_RD_ADDR (IPA_DEBUG_REG_BASE + 0x0000059c)
+#define HWIO_IPA_DPS_SEQ2UC_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x0000059c)
+#define HWIO_IPA_DPS_SEQ2UC_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x0000059c)
+#define HWIO_IPA_DPS_SEQ2UC_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x000005a0)
+#define HWIO_IPA_DPS_SEQ2UC_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x000005a0)
+#define HWIO_IPA_DPS_SEQ2UC_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x000005a0)
+#define HWIO_IPA_DPS_SEQ2UC_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x000005a4)
+#define HWIO_IPA_DPS_SEQ2UC_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x000005a4)
+#define HWIO_IPA_DPS_SEQ2UC_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x000005a4)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + 0x00000600)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+				       0x00000600)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+				       0x00000600)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_RMSK 0xff
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR, \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR, \
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR, \
+		v)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_NTF_TX_CMDQ_CMD_IN)
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_CMD_CLIENT_BMSK 0x7c
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000604)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000604)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000604)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+					      0x00000608)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS +	\
+					      0x00000608)
+#define HWIO_IPA_NTF_TX_CMDQ_RELEASE_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS +	\
+					      0x00000608)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + 0x0000060c)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000060c)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000060c)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000610)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000610)
+#define HWIO_IPA_NTF_TX_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000610)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000614)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000614)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_WR_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000614)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000618)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000618)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000618)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_RMSK 0xfffff
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_ATTR 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_IN in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_ADDR, \
+		HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_ADDR, \
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_REP_F_BMSK 0x80000
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_REP_F_SHFT 0x13
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_BMSK 0x60000
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_OPCODE_F_SHFT 0x11
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_BMSK 0x1f000
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_SRC_PIPE_F_SHFT 0xc
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_BMSK 0xff0
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_SRC_ID_F_SHFT 0x4
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_BMSK 0xf
+#define HWIO_IPA_NTF_TX_CMDQ_DATA_RD_0_CMDQ_CTX_ID_F_SHFT 0x0
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + 0x0000061c)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x0000061c)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x0000061c)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_IN in_dword_masked(	\
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_ADDR, \
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000620)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000620)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000620)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_RMSK 0x7fffffff
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_ADDR,	\
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_ADDR,	\
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x7fffffff
+#define HWIO_IPA_NTF_TX_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_NTF_TX_SNP_ADDR (IPA_DEBUG_REG_BASE + 0x00000624)
+#define HWIO_IPA_NTF_TX_SNP_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000624)
+#define HWIO_IPA_NTF_TX_SNP_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000624)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + 0x00000628)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					 0x00000628)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					 0x00000628)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_IN in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_NTF_TX_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_NTF_TX_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_NTF_TX_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000700)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000700)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000700)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_RMSK 0xff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ATTR 0x3
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_INM(m) in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR, \
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_OUT(v) out_dword( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR, \
+		v)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_IN)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_RD_REQ_BMSK 0x80
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_RD_REQ_SHFT 0x7
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_CMD_CLIENT_BMSK 0x7c
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_CMD_CLIENT_SHFT 0x2
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_POP_CMD_BMSK 0x2
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_POP_CMD_SHFT 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_WRITE_CMD_BMSK 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CMD_WRITE_CMD_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_WR_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000704)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_WR_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000704)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_WR_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000704)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_RD_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000708)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_RD_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000708)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_RELEASE_RD_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000708)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_WR_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000070c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000070c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000070c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000710)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000710)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000710)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_WR_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000714)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_WR_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000714)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000714)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR (IPA_DEBUG_REG_BASE + \
+						 0x00000718)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						 0x00000718)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						 0x00000718)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_RMSK 0xffffffff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ATTR 0x3
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_INM(m) in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_OUT(v) out_dword( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		v)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_IN)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_USERDATA_BMSK 0xf8000000
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_USERDATA_SHFT 0x1b
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_VALID_BMSK \
+	0x4000000
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_VALID_SHFT 0x1a
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SENT_BMSK 0x2000000
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SENT_SHFT 0x19
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_ORIGIN_BMSK 0x1000000
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_ORIGIN_SHFT 0x18
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_LENGTH_BMSK 0xffff00
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_LENGTH_SHFT 0x8
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_BMSK 0xff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_DATA_RD_CMDQ_SRC_ID_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000071c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000071c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000071c)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_RMSK 0x1ff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_ATTR 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_ADDR,	\
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_ADDR,	\
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_CMDQ_DEPTH_BMSK 0x1fc
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_CMDQ_DEPTH_SHFT 0x2
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_CMDQ_FULL_BMSK 0x2
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_CMDQ_FULL_SHFT 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_STATUS_BMSK 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_STATUS_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000720)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000720)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000720)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_RMSK 0x7fffffff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_ATTR 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_INM(m) in_dword_masked(	\
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_ADDR, \
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_BMSK 0x7fffffff
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_STATUS_EMPTY_CMDQ_EMPTY_SHFT 0x0
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_ADDR (IPA_DEBUG_REG_BASE + \
+					       0x00000724)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					       0x00000724)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					       0x00000724)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_RMSK 0x7f
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_ATTR 0x1
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_IN in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_ADDR, \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_RMSK)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_INM(m) in_dword_masked( \
+		HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_ADDR, \
+		m)
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_FIFO_COUNT_BMSK 0x7f
+#define HWIO_IPA_PROD_ACKMNGR_CMDQ_COUNT_FIFO_COUNT_SHFT 0x0
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_0_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000728)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000728)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_0_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000728)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_1_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000072c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000072c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000072c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_2_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000730)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000730)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000730)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_3_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000734)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_3_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000734)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_3_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000734)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_4_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000738)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_4_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000738)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_4_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000738)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_5_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000073c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_5_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000073c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_5_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000073c)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_6_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000740)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_6_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000740)
+#define HWIO_IPA_ACKMNGR_CONTROL_CFG_6_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000740)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKINJ_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000744)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKINJ_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000744)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKINJ_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000744)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKUPD_CFG_ADDR (IPA_DEBUG_REG_BASE + \
+						    0x00000748)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKUPD_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000748)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_ACKUPD_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000748)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x0000074c)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x0000074c)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x0000074c)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000750)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_STATUS_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000750)
+#define HWIO_IPA_ACKMNGR_SW_ACCESS_STATUS_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000750)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_0_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000754)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_0_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000754)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_0_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000754)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_1_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000758)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_1_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000758)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_1_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000758)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_2_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x0000075c)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_2_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x0000075c)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_2_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x0000075c)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_3_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000760)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_3_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000760)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_3_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000760)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_4_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000764)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_4_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000764)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_4_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000764)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_5_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000768)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_5_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000768)
+#define HWIO_IPA_PROD_ACKMNGR_CONTROL_CFG_5_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000768)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x0000076c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000076c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000076c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKUPD_CFG_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000770)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKUPD_CFG_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000770)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKUPD_CFG_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000770)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_CMD_ADDR (IPA_DEBUG_REG_BASE + \
+						  0x00000774)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_CMD_PHYS (IPA_DEBUG_REG_BASE_PHYS \
+						  + 0x00000774)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_CMD_OFFS (IPA_DEBUG_REG_BASE_OFFS \
+						  + 0x00000774)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_STATUS_ADDR (IPA_DEBUG_REG_BASE + \
+						     0x00000778)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_STATUS_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000778)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_STATUS_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000778)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG1_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x0000077c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG1_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000077c)
+#define HWIO_IPA_PROD_ACKMNGR_SW_ACCESS_ACKINJ_CFG1_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000077c)
+#define HWIO_IPA_SPARE_REG_1_ADDR (IPA_DEBUG_REG_BASE + 0x00000780)
+#define HWIO_IPA_SPARE_REG_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000780)
+#define HWIO_IPA_SPARE_REG_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000780)
+#define HWIO_IPA_SPARE_REG_1_RMSK 0xffffffff
+#define HWIO_IPA_SPARE_REG_1_ATTR 0x3
+#define HWIO_IPA_SPARE_REG_1_IN in_dword_masked(HWIO_IPA_SPARE_REG_1_ADDR, \
+						HWIO_IPA_SPARE_REG_1_RMSK)
+#define HWIO_IPA_SPARE_REG_1_INM(m) in_dword_masked( \
+		HWIO_IPA_SPARE_REG_1_ADDR, \
+		m)
+#define HWIO_IPA_SPARE_REG_1_OUT(v) out_dword(HWIO_IPA_SPARE_REG_1_ADDR, v)
+#define HWIO_IPA_SPARE_REG_1_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_SPARE_REG_1_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_SPARE_REG_1_IN)
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT31_BMSK 0x80000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT31_SHFT 0x1f
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT30_BMSK 0x40000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT30_SHFT 0x1e
+#define HWIO_IPA_SPARE_REG_1_SPARE_ACKINJ_PIPE8_MASK_ENABLE_BMSK \
+	0x20000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_ACKINJ_PIPE8_MASK_ENABLE_SHFT 0x1d
+#define	\
+	HWIO_IPA_SPARE_REG_1_WARB_FORCE_ARB_ROUND_FINISH_SPECIAL_DISABLE_BMSK \
+	0x10000000
+#define	\
+	HWIO_IPA_SPARE_REG_1_WARB_FORCE_ARB_ROUND_FINISH_SPECIAL_DISABLE_SHFT \
+	0x1c
+#define HWIO_IPA_SPARE_REG_1_DCPH_RAM_RD_PREFETCH_DISABLE_BMSK 0x8000000
+#define HWIO_IPA_SPARE_REG_1_DCPH_RAM_RD_PREFETCH_DISABLE_SHFT 0x1b
+#define HWIO_IPA_SPARE_REG_1_RAM_SLAVEWAY_ACCESS_PROTECTION_DISABLE_BMSK \
+	0x4000000
+#define HWIO_IPA_SPARE_REG_1_RAM_SLAVEWAY_ACCESS_PROTECTION_DISABLE_SHFT \
+	0x1a
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT25_BMSK 0x2000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT25_SHFT 0x19
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT24_BMSK 0x1000000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT24_SHFT 0x18
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT23_BMSK 0x800000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT23_SHFT 0x17
+#define HWIO_IPA_SPARE_REG_1_BAM_IDLE_IN_IPA_MISC_CGC_EN_BMSK 0x400000
+#define HWIO_IPA_SPARE_REG_1_BAM_IDLE_IN_IPA_MISC_CGC_EN_SHFT 0x16
+#define HWIO_IPA_SPARE_REG_1_GSI_IF_OUT_OF_BUF_STOP_RESET_MASK_ENABLE_BMSK \
+	0x200000
+#define HWIO_IPA_SPARE_REG_1_GSI_IF_OUT_OF_BUF_STOP_RESET_MASK_ENABLE_SHFT \
+	0x15
+#define HWIO_IPA_SPARE_REG_1_REVERT_WARB_FIX_BMSK 0x100000
+#define HWIO_IPA_SPARE_REG_1_REVERT_WARB_FIX_SHFT 0x14
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT19_BMSK 0x80000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT19_SHFT 0x13
+#define HWIO_IPA_SPARE_REG_1_RX_STALL_ON_GEN_DEAGGR_ERROR_BMSK 0x40000
+#define HWIO_IPA_SPARE_REG_1_RX_STALL_ON_GEN_DEAGGR_ERROR_SHFT 0x12
+#define HWIO_IPA_SPARE_REG_1_RX_STALL_ON_MBIM_DEAGGR_ERROR_BMSK 0x20000
+#define HWIO_IPA_SPARE_REG_1_RX_STALL_ON_MBIM_DEAGGR_ERROR_SHFT 0x11
+#define HWIO_IPA_SPARE_REG_1_QMB_RAM_RD_CACHE_DISABLE_BMSK 0x10000
+#define HWIO_IPA_SPARE_REG_1_QMB_RAM_RD_CACHE_DISABLE_SHFT 0x10
+#define	\
+	HWIO_IPA_SPARE_REG_1_RX_CMDQ_SPLITTER_CMDQ_PENDING_MUX_DISABLE_BMSK \
+	0x8000
+#define	\
+	HWIO_IPA_SPARE_REG_1_RX_CMDQ_SPLITTER_CMDQ_PENDING_MUX_DISABLE_SHFT \
+	0xf
+#define	\
+	HWIO_IPA_SPARE_REG_1_FRAG_MNGR_FAIRNESS_EVICTION_ON_CONSTRUCTING_BMSK \
+	0x4000
+#define	\
+	HWIO_IPA_SPARE_REG_1_FRAG_MNGR_FAIRNESS_EVICTION_ON_CONSTRUCTING_SHFT \
+	0xe
+#define HWIO_IPA_SPARE_REG_1_TX_BLOCK_AGGR_QUERY_ON_HOLB_PACKET_BMSK \
+	0x2000
+#define HWIO_IPA_SPARE_REG_1_TX_BLOCK_AGGR_QUERY_ON_HOLB_PACKET_SHFT 0xd
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT12_BMSK 0x1000
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT12_SHFT 0xc
+#define HWIO_IPA_SPARE_REG_1_TX_GIVES_SSPND_ACK_ON_OPEN_AGGR_FRAME_BMSK	\
+	0x800
+#define HWIO_IPA_SPARE_REG_1_TX_GIVES_SSPND_ACK_ON_OPEN_AGGR_FRAME_SHFT	\
+	0xb
+#define HWIO_IPA_SPARE_REG_1_ACL_DISPATCHER_PKT_CHECK_DISABLE_BMSK 0x400
+#define HWIO_IPA_SPARE_REG_1_ACL_DISPATCHER_PKT_CHECK_DISABLE_SHFT 0xa
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT8_BMSK 0x100
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT8_SHFT 0x8
+#define HWIO_IPA_SPARE_REG_1_ACL_DISPATCHER_FRAG_NOTIF_CHECK_DISABLE_BMSK \
+	0x40
+#define HWIO_IPA_SPARE_REG_1_ACL_DISPATCHER_FRAG_NOTIF_CHECK_DISABLE_SHFT \
+	0x6
+#define HWIO_IPA_SPARE_REG_1_ACL_INORDER_MULTI_DISABLE_BMSK 0x20
+#define HWIO_IPA_SPARE_REG_1_ACL_INORDER_MULTI_DISABLE_SHFT 0x5
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT4_BMSK 0x10
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT4_SHFT 0x4
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT3_BMSK 0x8
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT3_SHFT 0x3
+#define HWIO_IPA_SPARE_REG_1_GENQMB_AOOOWR_BMSK 0x4
+#define HWIO_IPA_SPARE_REG_1_GENQMB_AOOOWR_SHFT 0x2
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT1_BMSK 0x2
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT1_SHFT 0x1
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT0_BMSK 0x1
+#define HWIO_IPA_SPARE_REG_1_SPARE_BIT0_SHFT 0x0
+#define HWIO_IPA_SPARE_REG_2_ADDR (IPA_DEBUG_REG_BASE + 0x00000784)
+#define HWIO_IPA_SPARE_REG_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000784)
+#define HWIO_IPA_SPARE_REG_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000784)
+#define HWIO_IPA_SPARE_REG_2_RMSK 0xffffffff
+#define HWIO_IPA_SPARE_REG_2_ATTR 0x3
+#define HWIO_IPA_SPARE_REG_2_IN in_dword_masked(HWIO_IPA_SPARE_REG_2_ADDR, \
+						HWIO_IPA_SPARE_REG_2_RMSK)
+#define HWIO_IPA_SPARE_REG_2_INM(m) in_dword_masked( \
+		HWIO_IPA_SPARE_REG_2_ADDR, \
+		m)
+#define HWIO_IPA_SPARE_REG_2_OUT(v) out_dword(HWIO_IPA_SPARE_REG_2_ADDR, v)
+#define HWIO_IPA_SPARE_REG_2_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_SPARE_REG_2_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_SPARE_REG_2_IN)
+#define HWIO_IPA_SPARE_REG_2_SPARE_BITS_BMSK 0xfffffffc
+#define HWIO_IPA_SPARE_REG_2_SPARE_BITS_SHFT 0x2
+#define	\
+	HWIO_IPA_SPARE_REG_2_CMDQ_SPLIT_NOT_WAIT_DATA_DESC_PRIOR_HDR_PUSH_BMSK \
+	0x2
+#define	\
+	HWIO_IPA_SPARE_REG_2_CMDQ_SPLIT_NOT_WAIT_DATA_DESC_PRIOR_HDR_PUSH_SHFT \
+	0x1
+#define HWIO_IPA_SPARE_REG_2_TX_BRESP_INJ_WITH_FLOP_BMSK 0x1
+#define HWIO_IPA_SPARE_REG_2_TX_BRESP_INJ_WITH_FLOP_SHFT 0x0
+#define HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					  0x00000794 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG1_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					  0x00000794 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG1_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					  0x00000794 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG1_n_RMSK 0x80010000
+#define HWIO_IPA_ENDP_GSI_CFG1_n_MAXn 30
+#define HWIO_IPA_ENDP_GSI_CFG1_n_ATTR 0x3
+#define HWIO_IPA_ENDP_GSI_CFG1_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n), \
+		HWIO_IPA_ENDP_GSI_CFG1_n_RMSK)
+#define HWIO_IPA_ENDP_GSI_CFG1_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_GSI_CFG1_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_GSI_CFG1_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_GSI_CFG1_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_GSI_CFG1_n_INI(n))
+#define HWIO_IPA_ENDP_GSI_CFG1_n_INIT_ENDP_BMSK 0x80000000
+#define HWIO_IPA_ENDP_GSI_CFG1_n_INIT_ENDP_SHFT 0x1f
+#define HWIO_IPA_ENDP_GSI_CFG1_n_ENDP_EN_BMSK 0x10000
+#define HWIO_IPA_ENDP_GSI_CFG1_n_ENDP_EN_SHFT 0x10
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_1_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000908)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_1_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000908)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_1_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000908)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_2_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x0000090c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_2_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x0000090c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_2_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x0000090c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_3_ADDR (IPA_DEBUG_REG_BASE + \
+						      0x00000910)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_3_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000910)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_3_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000910)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_CTRL_ADDR ( \
+		IPA_DEBUG_REG_BASE + 0x00000914)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_CTRL_PHYS ( \
+		IPA_DEBUG_REG_BASE_PHYS + 0x00000914)
+#define HWIO_IPA_GSI_IPA_IF_TLV_OUT_GENERATOR_CTRL_OFFS ( \
+		IPA_DEBUG_REG_BASE_OFFS + 0x00000914)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_RDY_ADDR (IPA_DEBUG_REG_BASE + \
+					     0x00000918)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_RDY_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000918)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_RDY_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000918)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_1_ADDR (IPA_DEBUG_REG_BASE + \
+						0x0000091c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_1_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x0000091c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_1_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x0000091c)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_2_ADDR (IPA_DEBUG_REG_BASE + \
+						0x00000920)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_2_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000920)
+#define HWIO_IPA_GSI_IPA_IF_TLV_IN_DATA_2_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000920)
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x00000924 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000924 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000924 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_RMSK 0xffffff
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_MAXn 30
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_ATTR 0x3
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR(n), \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_RMSK)
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_OUTMI(n, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_GSI_CFG_TLV_n_INI(n))
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_FIFO_SIZE_BMSK 0xff0000
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_FIFO_SIZE_SHFT 0x10
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_FIFO_BASE_ADDR_BMSK 0xffff
+#define HWIO_IPA_ENDP_GSI_CFG_TLV_n_FIFO_BASE_ADDR_SHFT 0x0
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x000009a8 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x000009a8 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x000009a8 + 0x4 * (n))
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_RMSK 0xffffff
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_MAXn 30
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_ATTR 0x3
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_INI(n) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR(n), \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_RMSK)
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR(n), \
+		val)
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_OUTMI(n, mask, \
+					  val) out_dword_masked_ns( \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_ENDP_GSI_CFG_AOS_n_INI(n))
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_FIFO_SIZE_BMSK 0xff0000
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_FIFO_SIZE_SHFT 0x10
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_FIFO_BASE_ADDR_BMSK 0xffff
+#define HWIO_IPA_ENDP_GSI_CFG_AOS_n_FIFO_BASE_ADDR_SHFT 0x0
+#define HWIO_IPA_COAL_VP_AOS_FIFO_n_ADDR(n) (IPA_DEBUG_REG_BASE + \
+					     0x00000a60 + 0x4 * (n))
+#define HWIO_IPA_COAL_VP_AOS_FIFO_n_PHYS(n) (IPA_DEBUG_REG_BASE_PHYS + \
+					     0x00000a60 + 0x4 * (n))
+#define HWIO_IPA_COAL_VP_AOS_FIFO_n_OFFS(n) (IPA_DEBUG_REG_BASE_OFFS + \
+					     0x00000a60 + 0x4 * (n))
+#define HWIO_IPA_CTXH_CTRL_ADDR (IPA_DEBUG_REG_BASE + 0x00000afc)
+#define HWIO_IPA_CTXH_CTRL_PHYS (IPA_DEBUG_REG_BASE_PHYS + 0x00000afc)
+#define HWIO_IPA_CTXH_CTRL_OFFS (IPA_DEBUG_REG_BASE_OFFS + 0x00000afc)
+#define HWIO_IPA_CTXH_CTRL_RMSK 0x8000000f
+#define HWIO_IPA_CTXH_CTRL_ATTR 0x3
+#define HWIO_IPA_CTXH_CTRL_IN in_dword_masked(HWIO_IPA_CTXH_CTRL_ADDR, \
+					      HWIO_IPA_CTXH_CTRL_RMSK)
+#define HWIO_IPA_CTXH_CTRL_INM(m) in_dword_masked(HWIO_IPA_CTXH_CTRL_ADDR, \
+						  m)
+#define HWIO_IPA_CTXH_CTRL_OUT(v) out_dword(HWIO_IPA_CTXH_CTRL_ADDR, v)
+#define HWIO_IPA_CTXH_CTRL_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_CTXH_CTRL_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_CTXH_CTRL_IN)
+#define HWIO_IPA_CTXH_CTRL_CTXH_LOCK_BMSK 0x80000000
+#define HWIO_IPA_CTXH_CTRL_CTXH_LOCK_SHFT 0x1f
+#define HWIO_IPA_CTXH_CTRL_CTXH_LOCK_ID_BMSK 0xf
+#define HWIO_IPA_CTXH_CTRL_CTXH_LOCK_ID_SHFT 0x0
+#define HWIO_IPA_CTX_ID_m_CTX_NUM_n_ADDR(m, n) (IPA_DEBUG_REG_BASE + \
+						0x00000b00 + 0x80 * (m) + \
+						0x4 * (n))
+#define HWIO_IPA_CTX_ID_m_CTX_NUM_n_PHYS(m, n) (IPA_DEBUG_REG_BASE_PHYS + \
+						0x00000b00 + 0x80 * (m) + \
+						0x4 * (n))
+#define HWIO_IPA_CTX_ID_m_CTX_NUM_n_OFFS(m, n) (IPA_DEBUG_REG_BASE_OFFS + \
+						0x00000b00 + 0x80 * (m) + \
+						0x4 * (n))
+#define IPA_EE_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x00043000)
+#define IPA_EE_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + 0x00043000)
+#define IPA_EE_REG_BASE_OFFS 0x00043000
+#define HWIO_IPA_IRQ_STTS_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000008 + \
+					0x1000 * (n))
+#define HWIO_IPA_IRQ_STTS_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					0x00000008 + 0x1000 * (n))
+#define HWIO_IPA_IRQ_STTS_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					0x00000008 + 0x1000 * (n))
+#define HWIO_IPA_IRQ_EN_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x0000000c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EN_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + 0x0000000c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EN_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + 0x0000000c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EN_EE_n_RMSK 0x7bffffd
+#define HWIO_IPA_IRQ_EN_EE_n_MAXn 3
+#define HWIO_IPA_IRQ_EN_EE_n_ATTR 0x3
+#define HWIO_IPA_IRQ_EN_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_IRQ_EN_EE_n_ADDR(n), \
+		HWIO_IPA_IRQ_EN_EE_n_RMSK)
+#define HWIO_IPA_IRQ_EN_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_IRQ_EN_EE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_IRQ_EN_EE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_IRQ_EN_EE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_IRQ_EN_EE_n_OUTMI(n, mask, val) out_dword_masked_ns( \
+		HWIO_IPA_IRQ_EN_EE_n_ADDR(n), \
+		mask, \
+		val, \
+		HWIO_IPA_IRQ_EN_EE_n_INI(n))
+#define HWIO_IPA_IRQ_EN_EE_n_TLV_LEN_MIN_DSM_IRQ_EN_BMSK 0x4000000
+#define HWIO_IPA_IRQ_EN_EE_n_TLV_LEN_MIN_DSM_IRQ_EN_SHFT 0x1a
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_UC_IRQ_EN_BMSK 0x2000000
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_UC_IRQ_EN_SHFT 0x19
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_IPA_IF_TLV_RCVD_IRQ_EN_BMSK 0x1000000
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_IPA_IF_TLV_RCVD_IRQ_EN_SHFT 0x18
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_EE_IRQ_EN_BMSK 0x800000
+#define HWIO_IPA_IRQ_EN_EE_n_GSI_EE_IRQ_EN_SHFT 0x17
+#define HWIO_IPA_IRQ_EN_EE_n_UCP_IRQ_EN_BMSK 0x200000
+#define HWIO_IPA_IRQ_EN_EE_n_UCP_IRQ_EN_SHFT 0x15
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_RED_MARKER_ABOVE_IRQ_EN_BMSK 0x100000
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_RED_MARKER_ABOVE_IRQ_EN_SHFT 0x14
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_YELLOW_MARKER_ABOVE_IRQ_EN_BMSK 0x80000
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_YELLOW_MARKER_ABOVE_IRQ_EN_SHFT 0x13
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_RED_MARKER_BELOW_IRQ_EN_BMSK 0x40000
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_RED_MARKER_BELOW_IRQ_EN_SHFT 0x12
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_YELLOW_MARKER_BELOW_IRQ_EN_BMSK 0x20000
+#define HWIO_IPA_IRQ_EN_EE_n_PIPE_YELLOW_MARKER_BELOW_IRQ_EN_SHFT 0x11
+#define HWIO_IPA_IRQ_EN_EE_n_BAM_GSI_IDLE_IRQ_EN_BMSK 0x10000
+#define HWIO_IPA_IRQ_EN_EE_n_BAM_GSI_IDLE_IRQ_EN_SHFT 0x10
+#define HWIO_IPA_IRQ_EN_EE_n_TX_HOLB_DROP_IRQ_EN_BMSK 0x8000
+#define HWIO_IPA_IRQ_EN_EE_n_TX_HOLB_DROP_IRQ_EN_SHFT 0xf
+#define HWIO_IPA_IRQ_EN_EE_n_TX_SUSPEND_IRQ_EN_BMSK 0x4000
+#define HWIO_IPA_IRQ_EN_EE_n_TX_SUSPEND_IRQ_EN_SHFT 0xe
+#define HWIO_IPA_IRQ_EN_EE_n_PROC_ERR_IRQ_EN_BMSK 0x2000
+#define HWIO_IPA_IRQ_EN_EE_n_PROC_ERR_IRQ_EN_SHFT 0xd
+#define HWIO_IPA_IRQ_EN_EE_n_STEP_MODE_IRQ_EN_BMSK 0x1000
+#define HWIO_IPA_IRQ_EN_EE_n_STEP_MODE_IRQ_EN_SHFT 0xc
+#define HWIO_IPA_IRQ_EN_EE_n_TX_ERR_IRQ_EN_BMSK 0x800
+#define HWIO_IPA_IRQ_EN_EE_n_TX_ERR_IRQ_EN_SHFT 0xb
+#define HWIO_IPA_IRQ_EN_EE_n_DEAGGR_ERR_IRQ_EN_BMSK 0x400
+#define HWIO_IPA_IRQ_EN_EE_n_DEAGGR_ERR_IRQ_EN_SHFT 0xa
+#define HWIO_IPA_IRQ_EN_EE_n_RX_ERR_IRQ_EN_BMSK 0x200
+#define HWIO_IPA_IRQ_EN_EE_n_RX_ERR_IRQ_EN_SHFT 0x9
+#define HWIO_IPA_IRQ_EN_EE_n_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ_EN_BMSK 0x100
+#define HWIO_IPA_IRQ_EN_EE_n_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ_EN_SHFT 0x8
+#define HWIO_IPA_IRQ_EN_EE_n_UC_RX_CMD_Q_NOT_FULL_IRQ_EN_BMSK 0x80
+#define HWIO_IPA_IRQ_EN_EE_n_UC_RX_CMD_Q_NOT_FULL_IRQ_EN_SHFT 0x7
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IN_Q_NOT_EMPTY_IRQ_EN_BMSK 0x40
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IN_Q_NOT_EMPTY_IRQ_EN_SHFT 0x6
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_3_IRQ_EN_BMSK 0x20
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_3_IRQ_EN_SHFT 0x5
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_2_IRQ_EN_BMSK 0x10
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_2_IRQ_EN_SHFT 0x4
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_1_IRQ_EN_BMSK 0x8
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_1_IRQ_EN_SHFT 0x3
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_0_IRQ_EN_BMSK 0x4
+#define HWIO_IPA_IRQ_EN_EE_n_UC_IRQ_0_IRQ_EN_SHFT 0x2
+#define HWIO_IPA_IRQ_EN_EE_n_BAD_SNOC_ACCESS_IRQ_EN_BMSK 0x1
+#define HWIO_IPA_IRQ_EN_EE_n_BAD_SNOC_ACCESS_IRQ_EN_SHFT 0x0
+#define HWIO_IPA_IRQ_CLR_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000010 + \
+				       0x1000 * (n))
+#define HWIO_IPA_IRQ_CLR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + 0x00000010 + \
+				       0x1000 * (n))
+#define HWIO_IPA_IRQ_CLR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + 0x00000010 + \
+				       0x1000 * (n))
+#define HWIO_IPA_SNOC_FEC_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000018 + \
+					0x1000 * (n))
+#define HWIO_IPA_SNOC_FEC_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					0x00000018 + 0x1000 * (n))
+#define HWIO_IPA_SNOC_FEC_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					0x00000018 + 0x1000 * (n))
+#define HWIO_IPA_SNOC_FEC_EE_n_RMSK 0x8000f1ff
+#define HWIO_IPA_SNOC_FEC_EE_n_MAXn 3
+#define HWIO_IPA_SNOC_FEC_EE_n_ATTR 0x1
+#define HWIO_IPA_SNOC_FEC_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SNOC_FEC_EE_n_ADDR(n),	\
+		HWIO_IPA_SNOC_FEC_EE_n_RMSK)
+#define HWIO_IPA_SNOC_FEC_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SNOC_FEC_EE_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_SNOC_FEC_EE_n_READ_NOT_WRITE_BMSK 0x80000000
+#define HWIO_IPA_SNOC_FEC_EE_n_READ_NOT_WRITE_SHFT 0x1f
+#define HWIO_IPA_SNOC_FEC_EE_n_TID_BMSK 0xf000
+#define HWIO_IPA_SNOC_FEC_EE_n_TID_SHFT 0xc
+#define HWIO_IPA_SNOC_FEC_EE_n_QMB_INDEX_BMSK 0x100
+#define HWIO_IPA_SNOC_FEC_EE_n_QMB_INDEX_SHFT 0x8
+#define HWIO_IPA_SNOC_FEC_EE_n_CLIENT_BMSK 0xff
+#define HWIO_IPA_SNOC_FEC_EE_n_CLIENT_SHFT 0x0
+#define HWIO_IPA_IRQ_EE_UC_n_ADDR(n) (IPA_EE_REG_BASE + 0x0000001c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EE_UC_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + 0x0000001c + \
+				      0x1000 * (n))
+#define HWIO_IPA_IRQ_EE_UC_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + 0x0000001c + \
+				      0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000020 + \
+					0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					0x00000020 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					0x00000020 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_EE_n_RMSK 0xffffffff
+#define HWIO_IPA_FEC_ADDR_EE_n_MAXn 3
+#define HWIO_IPA_FEC_ADDR_EE_n_ATTR 0x1
+#define HWIO_IPA_FEC_ADDR_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_FEC_ADDR_EE_n_ADDR(n),	\
+		HWIO_IPA_FEC_ADDR_EE_n_RMSK)
+#define HWIO_IPA_FEC_ADDR_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_FEC_ADDR_EE_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_FEC_ADDR_EE_n_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_FEC_ADDR_EE_n_ADDR_SHFT 0x0
+#define HWIO_IPA_FEC_ADDR_MSB_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000024 + \
+					    0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_MSB_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					    0x00000024 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ADDR_MSB_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					    0x00000024 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ATTR_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00000028 + \
+					0x1000 * (n))
+#define HWIO_IPA_FEC_ATTR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					0x00000028 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ATTR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					0x00000028 + 0x1000 * (n))
+#define HWIO_IPA_FEC_ATTR_EE_n_RMSK 0xffffffff
+#define HWIO_IPA_FEC_ATTR_EE_n_MAXn 3
+#define HWIO_IPA_FEC_ATTR_EE_n_ATTR 0x1
+#define HWIO_IPA_FEC_ATTR_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_FEC_ATTR_EE_n_ADDR(n),	\
+		HWIO_IPA_FEC_ATTR_EE_n_RMSK)
+#define HWIO_IPA_FEC_ATTR_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_FEC_ATTR_EE_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_FEC_ATTR_EE_n_ERROR_INFO_BMSK 0xffffffc0
+#define HWIO_IPA_FEC_ATTR_EE_n_ERROR_INFO_SHFT 0x6
+#define HWIO_IPA_FEC_ATTR_EE_n_OPCODE_BMSK 0x3f
+#define HWIO_IPA_FEC_ATTR_EE_n_OPCODE_SHFT 0x0
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+						0x00000030 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+						0x00000030 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+						0x00000030 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_RMSK 0x7fffffff
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_MAXn 3
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ATTR 0x1
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ADDR(n),	\
+		HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_RMSK)
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ADDR(n),	\
+		mask)
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ENDPOINTS_BMSK 0x7fffffff
+#define HWIO_IPA_SUSPEND_IRQ_INFO_EE_n_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n) (IPA_EE_REG_BASE +	\
+					      0x00000034 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					      0x00000034 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					      0x00000034 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_RMSK 0x7fffffff
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_MAXn 3
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ATTR 0x3
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n), \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_RMSK)
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_INMI(n, mask) in_dword_masked( \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_OUTI(n, val) out_dword( \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n), \
+		val)
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_OUTMI(n, mask, \
+					   val) out_dword_masked_ns( \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ADDR( \
+			n), \
+		mask, \
+		val, \
+		HWIO_IPA_SUSPEND_IRQ_EN_EE_n_INI(n))
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ENDPOINTS_BMSK 0x7fffffff
+#define HWIO_IPA_SUSPEND_IRQ_EN_EE_n_ENDPOINTS_SHFT 0x0
+#define HWIO_IPA_SUSPEND_IRQ_CLR_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+					       0x00000038 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_CLR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					       0x00000038 + 0x1000 * (n))
+#define HWIO_IPA_SUSPEND_IRQ_CLR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					       0x00000038 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+						  0x0000003c + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+						  0x0000003c + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+						  0x0000003c + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_RMSK 0x7fffe000
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_MAXn 3
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ATTR 0x1
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_INI(n) in_dword_masked( \
+		HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ADDR(n), \
+		HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_RMSK)
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_INMI(n, mask) in_dword_masked(	\
+		HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ADDR(n), \
+		mask)
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ENDPOINTS_BMSK 0x7fffe000
+#define HWIO_IPA_HOLB_DROP_IRQ_INFO_EE_n_ENDPOINTS_SHFT 0xd
+#define HWIO_IPA_HOLB_DROP_IRQ_EN_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+						0x00000040 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_EN_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+						0x00000040 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_EN_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+						0x00000040 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_CLR_EE_n_ADDR(n) (IPA_EE_REG_BASE + \
+						 0x00000044 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_CLR_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS +	\
+						 0x00000044 + 0x1000 * (n))
+#define HWIO_IPA_HOLB_DROP_IRQ_CLR_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS +	\
+						 0x00000044 + 0x1000 * (n))
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR (IPA_EE_REG_BASE + 0x000010a0)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_PHYS (IPA_EE_REG_BASE_PHYS + \
+					   0x000010a0)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_OFFS (IPA_EE_REG_BASE_OFFS + \
+					   0x000010a0)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_IN)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_START_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_START_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR (IPA_EE_REG_BASE + \
+					       0x000010a4)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_PHYS (IPA_EE_REG_BASE_PHYS + \
+					       0x000010a4)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_OFFS (IPA_EE_REG_BASE_OFFS + \
+					       0x000010a4)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_IN)
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_START_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_ADDR_MSB_START_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_ADDR (IPA_EE_REG_BASE + \
+						0x000010a8)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_PHYS (IPA_EE_REG_BASE_PHYS + \
+						0x000010a8)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_OFFS (IPA_EE_REG_BASE_OFFS + \
+						0x000010a8)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_ADDR,	\
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_ADDR,	\
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_WRITE_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_WRITE_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_ADDR (IPA_EE_REG_BASE + \
+						    0x000010ac)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_PHYS (IPA_EE_REG_BASE_PHYS + \
+						    0x000010ac)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_OFFS (IPA_EE_REG_BASE_OFFS + \
+						    0x000010ac)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_WRITE_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_WRITE_PTR_MSB_WRITE_ADDR_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR (IPA_EE_REG_BASE + 0x000010b0)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_PHYS (IPA_EE_REG_BASE_PHYS + \
+					  0x000010b0)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_OFFS (IPA_EE_REG_BASE_OFFS + \
+					  0x000010b0)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_RMSK 0x1ffff
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_ATTR 0x3
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_IN in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_INM(m) in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_OUT(v) out_dword( \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR, \
+		v)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_LOG_BUF_STATUS_CFG_IN)
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_ENABLE_BMSK 0x10000
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_ENABLE_SHFT 0x10
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_SIZE_BMSK 0xffff
+#define HWIO_IPA_LOG_BUF_STATUS_CFG_SIZE_SHFT 0x0
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_ADDR (IPA_EE_REG_BASE + 0x000010b4)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_PHYS (IPA_EE_REG_BASE_PHYS + \
+					      0x000010b4)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_OFFS (IPA_EE_REG_BASE_OFFS + \
+					      0x000010b4)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_RMSK 0xffffffff
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_ATTR 0x1
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_IN in_dword_masked( \
+		HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_ADDR, \
+		HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_RMSK)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_INM(m) in_dword_masked(	\
+		HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_ADDR, \
+		m)
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_WRITE_PTR_BMSK 0xffff0000
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_WRITE_PTR_SHFT 0x10
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_READ_PTR_BMSK 0xffff
+#define HWIO_IPA_LOG_BUF_STATUS_RAM_PTR_READ_PTR_SHFT 0x0
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_0_ADDR (IPA_EE_REG_BASE + \
+						0x000010c0)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_0_PHYS (IPA_EE_REG_BASE_PHYS + \
+						0x000010c0)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_0_OFFS (IPA_EE_REG_BASE_OFFS + \
+						0x000010c0)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_1_ADDR (IPA_EE_REG_BASE + \
+						0x000010c4)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_1_PHYS (IPA_EE_REG_BASE_PHYS + \
+						0x000010c4)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_1_OFFS (IPA_EE_REG_BASE_OFFS + \
+						0x000010c4)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_0_ADDR (IPA_EE_REG_BASE + \
+						  0x000010c8)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_0_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000010c8)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_0_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000010c8)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_1_ADDR (IPA_EE_REG_BASE + \
+						  0x000010cc)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_1_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000010cc)
+#define HWIO_IPA_LTE_DECIPH_CONFIG_VALUES_1_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000010cc)
+#define HWIO_IPA_SECURED_PIPES_ADDR (IPA_EE_REG_BASE + 0x000010d0)
+#define HWIO_IPA_SECURED_PIPES_PHYS (IPA_EE_REG_BASE_PHYS + 0x000010d0)
+#define HWIO_IPA_SECURED_PIPES_OFFS (IPA_EE_REG_BASE_OFFS + 0x000010d0)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_CFG_ADDR (IPA_EE_REG_BASE + \
+						  0x000010d4)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_CFG_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000010d4)
+#define HWIO_IPA_LTE_DECIPH_INIT_VALUES_CFG_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000010d4)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__CONTROL_ADDR (IPA_EE_REG_BASE + \
+						   0x00001200)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__CONTROL_PHYS (IPA_EE_REG_BASE_PHYS + \
+						   0x00001200)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__CONTROL_OFFS (IPA_EE_REG_BASE_OFFS + \
+						   0x00001200)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__NMI_ADDR (IPA_EE_REG_BASE + \
+					       0x00001204)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__NMI_PHYS (IPA_EE_REG_BASE_PHYS + \
+					       0x00001204)
+#define HWIO_IPA_UC_REGS_INSIDE_IPA__NMI_OFFS (IPA_EE_REG_BASE_OFFS + \
+					       0x00001204)
+#define HWIO_IPA_SET_UC_IRQ_EE_n_ADDR(n) (IPA_EE_REG_BASE + 0x00002048 + \
+					  0x4 * (n))
+#define HWIO_IPA_SET_UC_IRQ_EE_n_PHYS(n) (IPA_EE_REG_BASE_PHYS + \
+					  0x00002048 + 0x4 * (n))
+#define HWIO_IPA_SET_UC_IRQ_EE_n_OFFS(n) (IPA_EE_REG_BASE_OFFS + \
+					  0x00002048 + 0x4 * (n))
+#define HWIO_IPA_SET_UC_IRQ_ALL_EES_ADDR (IPA_EE_REG_BASE + 0x00002058)
+#define HWIO_IPA_SET_UC_IRQ_ALL_EES_PHYS (IPA_EE_REG_BASE_PHYS + \
+					  0x00002058)
+#define HWIO_IPA_SET_UC_IRQ_ALL_EES_OFFS (IPA_EE_REG_BASE_OFFS + \
+					  0x00002058)
+#define HWIO_IPA_UCP_RESUME_ADDR (IPA_EE_REG_BASE + 0x000030a0)
+#define HWIO_IPA_UCP_RESUME_PHYS (IPA_EE_REG_BASE_PHYS + 0x000030a0)
+#define HWIO_IPA_UCP_RESUME_OFFS (IPA_EE_REG_BASE_OFFS + 0x000030a0)
+#define HWIO_IPA_PROC_UCP_CFG_ADDR (IPA_EE_REG_BASE + 0x000030a4)
+#define HWIO_IPA_PROC_UCP_CFG_PHYS (IPA_EE_REG_BASE_PHYS + 0x000030a4)
+#define HWIO_IPA_PROC_UCP_CFG_OFFS (IPA_EE_REG_BASE_OFFS + 0x000030a4)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_0_ADDR (IPA_EE_REG_BASE + \
+						  0x000030a8)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_0_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000030a8)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_0_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000030a8)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_1_ADDR (IPA_EE_REG_BASE + \
+						  0x000030ac)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_1_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000030ac)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_1_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000030ac)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_2_ADDR (IPA_EE_REG_BASE + \
+						  0x000030b0)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_2_PHYS (IPA_EE_REG_BASE_PHYS + \
+						  0x000030b0)
+#define HWIO_IPA_UC_PKT_PROCESS_BASE_ADDR_2_OFFS (IPA_EE_REG_BASE_OFFS + \
+						  0x000030b0)
+#define IPA_UC_IPA_UC_PER_REG_BASE (IPA_0_IPA_WRAPPER_BASE + 0x000c0000)
+#define IPA_UC_IPA_UC_PER_REG_BASE_PHYS (IPA_0_IPA_WRAPPER_BASE_PHYS + \
+					 0x000c0000)
+#define IPA_UC_IPA_UC_PER_REG_BASE_OFFS 0x000c0000
+#define HWIO_IPA_UC_STATUS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000000)
+#define HWIO_IPA_UC_STATUS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				 0x00000000)
+#define HWIO_IPA_UC_STATUS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				 0x00000000)
+#define HWIO_IPA_UC_CONTROL_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000004)
+#define HWIO_IPA_UC_CONTROL_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				  0x00000004)
+#define HWIO_IPA_UC_CONTROL_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				  0x00000004)
+#define HWIO_IPA_UC_BASE_ADDR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				    0x00000008)
+#define HWIO_IPA_UC_BASE_ADDR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				    0x00000008)
+#define HWIO_IPA_UC_BASE_ADDR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				    0x00000008)
+#define HWIO_IPA_UC_BASE_ADDR_MSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x0000000c)
+#define HWIO_IPA_UC_BASE_ADDR_MSB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x0000000c)
+#define HWIO_IPA_UC_BASE_ADDR_MSB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x0000000c)
+#define HWIO_IPA_UC_SYS_BUS_ATTRIB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					 0x00000010)
+#define HWIO_IPA_UC_SYS_BUS_ATTRIB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					 0x00000010)
+#define HWIO_IPA_UC_SYS_BUS_ATTRIB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					 0x00000010)
+#define HWIO_IPA_UC_PEND_IRQ_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000014)
+#define HWIO_IPA_UC_PEND_IRQ_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				   0x00000014)
+#define HWIO_IPA_UC_PEND_IRQ_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				   0x00000014)
+#define HWIO_IPA_UC_TRACE_BUFFER_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000018)
+#define HWIO_IPA_UC_TRACE_BUFFER_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000018)
+#define HWIO_IPA_UC_TRACE_BUFFER_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000018)
+#define HWIO_IPA_UC_PC_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x0000001c)
+#define HWIO_IPA_UC_PC_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x0000001c)
+#define HWIO_IPA_UC_PC_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x0000001c)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_LSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE \
+					       + 0x00000024)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_LSB_PHYS (	\
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000024)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_LSB_OFFS (	\
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000024)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_MSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE \
+					       + 0x00000028)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_MSB_PHYS (	\
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000028)
+#define HWIO_IPA_UC_VUIC_INT_ADDRESS_MSB_OFFS (	\
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000028)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000100)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000100)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000100)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_RMSK 0xffffffff
+#define HWIO_IPA_UC_QMB_SYS_ADDR_ATTR 0x3
+#define HWIO_IPA_UC_QMB_SYS_ADDR_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_ADDR, \
+		HWIO_IPA_UC_QMB_SYS_ADDR_RMSK)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_SYS_ADDR_IN)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_ADDR_BMSK 0xffffffff
+#define HWIO_IPA_UC_QMB_SYS_ADDR_ADDR_SHFT 0x0
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE +	\
+					   0x00000104)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					   + 0x00000104)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					   + 0x00000104)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_RMSK 0xffffffff
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ATTR 0x3
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR, \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_RMSK)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_SYS_ADDR_MSB_IN)
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR_MSB_BMSK 0xffffffff
+#define HWIO_IPA_UC_QMB_SYS_ADDR_MSB_ADDR_MSB_SHFT 0x0
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					 0x00000108)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					 0x00000108)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					 0x00000108)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_RMSK 0x3ffff
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_ATTR 0x3
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR, \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_RMSK)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_LOCAL_ADDR_IN)
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR_BMSK 0x3ffff
+#define HWIO_IPA_UC_QMB_LOCAL_ADDR_ADDR_SHFT 0x0
+#define HWIO_IPA_UC_QMB_LENGTH_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				     0x0000010c)
+#define HWIO_IPA_UC_QMB_LENGTH_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				     0x0000010c)
+#define HWIO_IPA_UC_QMB_LENGTH_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				     0x0000010c)
+#define HWIO_IPA_UC_QMB_LENGTH_RMSK 0x7f
+#define HWIO_IPA_UC_QMB_LENGTH_ATTR 0x3
+#define HWIO_IPA_UC_QMB_LENGTH_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_LENGTH_ADDR, \
+		HWIO_IPA_UC_QMB_LENGTH_RMSK)
+#define HWIO_IPA_UC_QMB_LENGTH_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_LENGTH_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_LENGTH_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_LENGTH_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_LENGTH_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_LENGTH_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_LENGTH_IN)
+#define HWIO_IPA_UC_QMB_LENGTH_LENGTH_BMSK 0x7f
+#define HWIO_IPA_UC_QMB_LENGTH_LENGTH_SHFT 0x0
+#define HWIO_IPA_UC_QMB_TRIGGER_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				      0x00000110)
+#define HWIO_IPA_UC_QMB_TRIGGER_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS +	\
+				      0x00000110)
+#define HWIO_IPA_UC_QMB_TRIGGER_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS +	\
+				      0x00000110)
+#define HWIO_IPA_UC_QMB_TRIGGER_RMSK 0x31
+#define HWIO_IPA_UC_QMB_TRIGGER_ATTR 0x3
+#define HWIO_IPA_UC_QMB_TRIGGER_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_TRIGGER_ADDR, \
+		HWIO_IPA_UC_QMB_TRIGGER_RMSK)
+#define HWIO_IPA_UC_QMB_TRIGGER_INM(m) in_dword_masked(	\
+		HWIO_IPA_UC_QMB_TRIGGER_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_TRIGGER_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_TRIGGER_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_TRIGGER_OUTM(m, v) out_dword_masked_ns(	\
+		HWIO_IPA_UC_QMB_TRIGGER_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_TRIGGER_IN)
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_BMSK 0x30
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_SHFT 0x4
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_DATA_POSTED_FVAL 0x0
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_RESP_POSTED_FVAL 0x1
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_DATA_COMPLETE_FVAL 0x2
+#define HWIO_IPA_UC_QMB_TRIGGER_POSTING_RESP_COMPLETE_FVAL 0x3
+#define HWIO_IPA_UC_QMB_TRIGGER_DIRECTION_BMSK 0x1
+#define HWIO_IPA_UC_QMB_TRIGGER_DIRECTION_SHFT 0x0
+#define HWIO_IPA_UC_QMB_TRIGGER_DIRECTION_READ_FVAL 0x0
+#define HWIO_IPA_UC_QMB_TRIGGER_DIRECTION_WRITE_FVAL 0x1
+#define HWIO_IPA_UC_QMB_PENDING_TID_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					  0x00000114)
+#define HWIO_IPA_UC_QMB_PENDING_TID_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					  + 0x00000114)
+#define HWIO_IPA_UC_QMB_PENDING_TID_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					  + 0x00000114)
+#define HWIO_IPA_UC_QMB_PENDING_TID_RMSK 0x11113f
+#define HWIO_IPA_UC_QMB_PENDING_TID_ATTR 0x1
+#define HWIO_IPA_UC_QMB_PENDING_TID_IN in_dword_masked(	\
+		HWIO_IPA_UC_QMB_PENDING_TID_ADDR, \
+		HWIO_IPA_UC_QMB_PENDING_TID_RMSK)
+#define HWIO_IPA_UC_QMB_PENDING_TID_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_PENDING_TID_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_SECURITY_BMSK 0x100000
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_SECURITY_SHFT 0x14
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_MAX_COMP_BMSK 0x10000
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_MAX_COMP_SHFT 0x10
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_MAX_OS_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_MAX_OS_SHFT 0xc
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_BUS_BMSK 0x100
+#define HWIO_IPA_UC_QMB_PENDING_TID_ERROR_BUS_SHFT 0x8
+#define HWIO_IPA_UC_QMB_PENDING_TID_TID_BMSK 0x3f
+#define HWIO_IPA_UC_QMB_PENDING_TID_TID_SHFT 0x0
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_ADDR (IPA_UC_IPA_UC_PER_REG_BASE \
+						+ 0x00000118)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000118)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000118)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ADDR ( \
+		IPA_UC_IPA_UC_PER_REG_BASE + 0x0000011c)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x0000011c)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x0000011c)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_RMSK 0x113f
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ATTR 0x1
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ADDR, \
+		HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_RMSK)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_VALID_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_VALID_SHFT 0xc
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ERROR_BMSK 0x100
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_ERROR_SHFT 0x8
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_TID_BMSK 0x3f
+#define HWIO_IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK_TID_SHFT 0x0
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_ADDR (IPA_UC_IPA_UC_PER_REG_BASE \
+						+ 0x00000120)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000120)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000120)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ADDR ( \
+		IPA_UC_IPA_UC_PER_REG_BASE + 0x00000124)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000124)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000124)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_RMSK 0x113f
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ATTR 0x1
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ADDR, \
+		HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_RMSK)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_VALID_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_VALID_SHFT 0xc
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ERROR_BMSK 0x100
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_ERROR_SHFT 0x8
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_TID_BMSK 0x3f
+#define HWIO_IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK_TID_SHFT 0x0
+#define HWIO_IPA_UC_QMB_MISC_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000128)
+#define HWIO_IPA_UC_QMB_MISC_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				   0x00000128)
+#define HWIO_IPA_UC_QMB_MISC_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				   0x00000128)
+#define HWIO_IPA_UC_QMB_MISC_RMSK 0xf11333ff
+#define HWIO_IPA_UC_QMB_MISC_ATTR 0x3
+#define HWIO_IPA_UC_QMB_MISC_IN in_dword_masked(HWIO_IPA_UC_QMB_MISC_ADDR, \
+						HWIO_IPA_UC_QMB_MISC_RMSK)
+#define HWIO_IPA_UC_QMB_MISC_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_MISC_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_MISC_OUT(v) out_dword(HWIO_IPA_UC_QMB_MISC_ADDR, v)
+#define HWIO_IPA_UC_QMB_MISC_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_MISC_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_MISC_IN)
+#define HWIO_IPA_UC_QMB_MISC_QMB_HREADY_BCR_BMSK 0x80000000
+#define HWIO_IPA_UC_QMB_MISC_QMB_HREADY_BCR_SHFT 0x1f
+#define HWIO_IPA_UC_QMB_MISC_POSTED_STALL_BMSK 0x40000000
+#define HWIO_IPA_UC_QMB_MISC_POSTED_STALL_SHFT 0x1e
+#define HWIO_IPA_UC_QMB_MISC_IRQ_COAL_BMSK 0x20000000
+#define HWIO_IPA_UC_QMB_MISC_IRQ_COAL_SHFT 0x1d
+#define HWIO_IPA_UC_QMB_MISC_SWAP_BMSK 0x10000000
+#define HWIO_IPA_UC_QMB_MISC_SWAP_SHFT 0x1c
+#define HWIO_IPA_UC_QMB_MISC_OOOWR_BMSK 0x1000000
+#define HWIO_IPA_UC_QMB_MISC_OOOWR_SHFT 0x18
+#define HWIO_IPA_UC_QMB_MISC_OOORD_BMSK 0x100000
+#define HWIO_IPA_UC_QMB_MISC_OOORD_SHFT 0x14
+#define HWIO_IPA_UC_QMB_MISC_WR_PRIORITY_BMSK 0x30000
+#define HWIO_IPA_UC_QMB_MISC_WR_PRIORITY_SHFT 0x10
+#define HWIO_IPA_UC_QMB_MISC_RD_PRIORITY_BMSK 0x3000
+#define HWIO_IPA_UC_QMB_MISC_RD_PRIORITY_SHFT 0xc
+#define HWIO_IPA_UC_QMB_MISC_USER_BMSK 0x3ff
+#define HWIO_IPA_UC_QMB_MISC_USER_SHFT 0x0
+#define HWIO_IPA_UC_QMB_STATUS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				     0x0000012c)
+#define HWIO_IPA_UC_QMB_STATUS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				     0x0000012c)
+#define HWIO_IPA_UC_QMB_STATUS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				     0x0000012c)
+#define HWIO_IPA_UC_QMB_STATUS_RMSK 0x1fff1fff
+#define HWIO_IPA_UC_QMB_STATUS_ATTR 0x1
+#define HWIO_IPA_UC_QMB_STATUS_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_STATUS_ADDR, \
+		HWIO_IPA_UC_QMB_STATUS_RMSK)
+#define HWIO_IPA_UC_QMB_STATUS_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_STATUS_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_WR_FIFO_FULL_BMSK 0x10000000
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_WR_FIFO_FULL_SHFT 0x1c
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_WR_CNT_BMSK 0xf000000
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_WR_CNT_SHFT 0x18
+#define HWIO_IPA_UC_QMB_STATUS_OUTSTANDING_WR_CNT_BMSK 0xf00000
+#define HWIO_IPA_UC_QMB_STATUS_OUTSTANDING_WR_CNT_SHFT 0x14
+#define HWIO_IPA_UC_QMB_STATUS_MAX_OUTSTANDING_WR_BMSK 0xf0000
+#define HWIO_IPA_UC_QMB_STATUS_MAX_OUTSTANDING_WR_SHFT 0x10
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_RD_FIFO_FULL_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_RD_FIFO_FULL_SHFT 0xc
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_RD_CNT_BMSK 0xf00
+#define HWIO_IPA_UC_QMB_STATUS_COMPLETED_RD_CNT_SHFT 0x8
+#define HWIO_IPA_UC_QMB_STATUS_OUTSTANDING_RD_CNT_BMSK 0xf0
+#define HWIO_IPA_UC_QMB_STATUS_OUTSTANDING_RD_CNT_SHFT 0x4
+#define HWIO_IPA_UC_QMB_STATUS_MAX_OUTSTANDING_RD_BMSK 0xf
+#define HWIO_IPA_UC_QMB_STATUS_MAX_OUTSTANDING_RD_SHFT 0x0
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					 0x00000130)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					 0x00000130)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					 0x00000130)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_RMSK 0x1117
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_ATTR 0x3
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_IN in_dword_masked( \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR, \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_RMSK)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_INM(m) in_dword_masked( \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR, \
+		m)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_OUT(v) out_dword( \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR, \
+		v)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_OUTM(m, v) out_dword_masked_ns( \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_ADDR, \
+		m, \
+		v, \
+		HWIO_IPA_UC_QMB_BUS_ATTRIB_IN)
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_SHARED_BMSK 0x1000
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_SHARED_SHFT 0xc
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_INNERSHARED_BMSK 0x100
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_INNERSHARED_SHFT 0x8
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_NOALLOCATE_BMSK 0x10
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_NOALLOCATE_SHFT 0x4
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_BMSK 0x7
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_SHFT 0x0
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_STRONGLY_ORDERED_FVAL 0x0
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_DEVICE_FVAL 0x1
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_NON_CACHEABLE_FVAL 0x2
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_COPYBACK_WRITEALLOCATE_FVAL 0x3
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_WRITETHROUGH_NOALLOCATE_FVAL	\
+	0x6
+#define HWIO_IPA_UC_QMB_BUS_ATTRIB_MEMTYPE_COPYBACK_NOALLOCATE_FVAL 0x7
+#define HWIO_IPA_UC_MBOX_INT_STTS_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					     0x00000200 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_STTS_n_PHYS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000200 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_STTS_n_OFFS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000200 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_EN_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE +	\
+					   0x00000204 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_EN_n_PHYS(n) (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					   + 0x00000204 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_EN_n_OFFS(n) (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					   + 0x00000204 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_CLR_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					    0x00000208 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_CLR_n_PHYS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000208 + 0x10 * (n))
+#define HWIO_IPA_UC_MBOX_INT_CLR_n_OFFS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000208 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_STTS_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					    0x00000300 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_STTS_n_PHYS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000300 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_STTS_n_OFFS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000300 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_EN_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					  0x00000304 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_EN_n_PHYS(n) (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					  + 0x00000304 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_EN_n_OFFS(n) (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					  + 0x00000304 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_CLR_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE +	\
+					   0x00000308 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_CLR_n_PHYS(n) (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					   + 0x00000308 + 0x10 * (n))
+#define HWIO_IPA_UC_IPA_INT_CLR_n_OFFS(n) (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					   + 0x00000308 + 0x10 * (n))
+#define HWIO_IPA_UC_HWEV_INT_STTS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x00000400)
+#define HWIO_IPA_UC_HWEV_INT_STTS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x00000400)
+#define HWIO_IPA_UC_HWEV_INT_STTS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x00000400)
+#define HWIO_IPA_UC_HWEV_INT_EN_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				      0x00000404)
+#define HWIO_IPA_UC_HWEV_INT_EN_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS +	\
+				      0x00000404)
+#define HWIO_IPA_UC_HWEV_INT_EN_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS +	\
+				      0x00000404)
+#define HWIO_IPA_UC_HWEV_INT_CLR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000408)
+#define HWIO_IPA_UC_HWEV_INT_CLR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000408)
+#define HWIO_IPA_UC_HWEV_INT_CLR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000408)
+#define HWIO_IPA_UC_SWEV_INT_STTS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x00000410)
+#define HWIO_IPA_UC_SWEV_INT_STTS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x00000410)
+#define HWIO_IPA_UC_SWEV_INT_STTS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x00000410)
+#define HWIO_IPA_UC_SWEV_INT_EN_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				      0x00000414)
+#define HWIO_IPA_UC_SWEV_INT_EN_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS +	\
+				      0x00000414)
+#define HWIO_IPA_UC_SWEV_INT_EN_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS +	\
+				      0x00000414)
+#define HWIO_IPA_UC_SWEV_INT_CLR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000418)
+#define HWIO_IPA_UC_SWEV_INT_CLR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000418)
+#define HWIO_IPA_UC_SWEV_INT_CLR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000418)
+#define HWIO_IPA_UC_VUIC_INT_STTS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x0000041c)
+#define HWIO_IPA_UC_VUIC_INT_STTS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x0000041c)
+#define HWIO_IPA_UC_VUIC_INT_STTS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x0000041c)
+#define HWIO_IPA_UC_VUIC_INT_CLR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000420)
+#define HWIO_IPA_UC_VUIC_INT_CLR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000420)
+#define HWIO_IPA_UC_VUIC_INT_CLR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000420)
+#define HWIO_IPA_UC_TIMER_CTRL_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					  0x00000500 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_CTRL_n_PHYS(n) (IPA_UC_IPA_UC_PER_REG_BASE_PHYS \
+					  + 0x00000500 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_CTRL_n_OFFS(n) (IPA_UC_IPA_UC_PER_REG_BASE_OFFS \
+					  + 0x00000500 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_STATUS_n_ADDR(n) (IPA_UC_IPA_UC_PER_REG_BASE + \
+					    0x00000508 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_STATUS_n_PHYS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000508 + 0x10 * (n))
+#define HWIO_IPA_UC_TIMER_STATUS_n_OFFS(n) ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000508 + 0x10 * (n))
+#define HWIO_IPA_UC_EVENTS_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00000600)
+#define HWIO_IPA_UC_EVENTS_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				 0x00000600)
+#define HWIO_IPA_UC_EVENTS_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				 0x00000600)
+#define HWIO_IPA_UC_VUIC_BUS_ADDR_TRANSLATE_EN_ADDR ( \
+		IPA_UC_IPA_UC_PER_REG_BASE + 0x00000710)
+#define HWIO_IPA_UC_VUIC_BUS_ADDR_TRANSLATE_EN_PHYS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_PHYS + 0x00000710)
+#define HWIO_IPA_UC_VUIC_BUS_ADDR_TRANSLATE_EN_OFFS ( \
+		IPA_UC_IPA_UC_PER_REG_BASE_OFFS + 0x00000710)
+#define HWIO_IPA_UC_SYS_ADDR_MSB_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+				       0x00000714)
+#define HWIO_IPA_UC_SYS_ADDR_MSB_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				       0x00000714)
+#define HWIO_IPA_UC_SYS_ADDR_MSB_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				       0x00000714)
+#define HWIO_IPA_UC_PC_RESTORE_WR_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x00000718)
+#define HWIO_IPA_UC_PC_RESTORE_WR_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x00000718)
+#define HWIO_IPA_UC_PC_RESTORE_WR_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x00000718)
+#define HWIO_IPA_UC_PC_RESTORE_RD_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + \
+					0x0000071c)
+#define HWIO_IPA_UC_PC_RESTORE_RD_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+					0x0000071c)
+#define HWIO_IPA_UC_PC_RESTORE_RD_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+					0x0000071c)
+#define HWIO_IPA_UC_SPARE_ADDR (IPA_UC_IPA_UC_PER_REG_BASE + 0x00001ffc)
+#define HWIO_IPA_UC_SPARE_PHYS (IPA_UC_IPA_UC_PER_REG_BASE_PHYS + \
+				0x00001ffc)
+#define HWIO_IPA_UC_SPARE_OFFS (IPA_UC_IPA_UC_PER_REG_BASE_OFFS + \
+				0x00001ffc)
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h
new file mode 100644
index 0000000..1600f0a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h
@@ -0,0 +1,2909 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_HWIO_DEF_H_)
+#define _IPA_HWIO_DEF_H_
+struct ipa_hwio_def_ipa_gsi_top_gsi_cfg_s {
+	u32	gsi_enable : 1;
+	u32	mcs_enable : 1;
+	u32	double_mcs_clk_freq : 1;
+	u32	uc_is_mcs : 1;
+	u32	gsi_pwr_clps : 1;
+	u32	bp_mtrix_disable : 1;
+	u32	reserved0 : 2;
+	u32	sleep_clk_div : 4;
+	u32	reserved1 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_cfg_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_ree_cfg_s {
+	u32	move_to_esc_clr_mode_trsh : 1;
+	u32	channel_empty_int_enable : 1;
+	u32	reserved0 : 6;
+	u32	max_burst_size : 8;
+	u32	reserved1 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_ree_cfg_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_ree_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_manager_ee_qos_n_s {
+	u32	ee_prio : 2;
+	u32	reserved0 : 6;
+	u32	max_ch_alloc : 5;
+	u32	reserved1 : 3;
+	u32	max_ev_alloc : 5;
+	u32	reserved2 : 11;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_manager_ee_qos_n_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_manager_ee_qos_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr_u {
+	struct
+	ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr_u {
+	struct
+	ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr_u {
+	struct
+	ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr_s {
+	u32	shram_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr_u {
+	struct
+	ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_cmd_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_cmd_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_cmd_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_db_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_db_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_db_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ev_db_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ev_db_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ev_db_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_new_re_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_new_re_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_new_re_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_dis_comp_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_dis_comp_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_dis_comp_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_empty_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_empty_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_empty_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_event_gen_comp_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_event_gen_comp_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_event_gen_comp_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_timer_expired_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_timer_expired_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_timer_expired_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_write_eng_comp_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_write_eng_comp_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_write_eng_comp_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_read_eng_comp_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_read_eng_comp_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_read_eng_comp_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_uc_gp_int_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_uc_gp_int_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_uc_gp_int_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_int_mod_stopped_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_int_mod_stopped_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_int_mod_stopped_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_inst_ram_n_s {
+	u32 inst_byte_0 : 8;
+	u32 inst_byte_1 : 8;
+	u32 inst_byte_2 : 8;
+	u32 inst_byte_3 : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_inst_ram_n_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_inst_ram_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_shram_n_s {
+	u32 shram : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_shram_n_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_map_ee_n_ch_k_vp_table_s {
+	u32	phy_ch : 5;
+	u32	valid : 1;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_map_ee_n_ch_k_vp_table_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_map_ee_n_ch_k_vp_table_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_test_bus_sel_s {
+	u32	gsi_testbus_sel : 8;
+	u32	reserved0 : 8;
+	u32	gsi_hw_events_sel : 4;
+	u32	reserved1 : 12;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_test_bus_sel_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_test_bus_sel_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_test_bus_reg_s {
+	u32 gsi_testbus_reg : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_test_bus_reg_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_test_bus_reg_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_busy_reg_s {
+	u32	csr_busy : 1;
+	u32	ree_busy : 1;
+	u32	mcs_busy : 1;
+	u32	timer_busy : 1;
+	u32	rd_wr_busy : 1;
+	u32	ev_eng_busy : 1;
+	u32	int_eng_busy : 1;
+	u32	ree_pwr_clps_busy : 1;
+	u32	db_eng_busy : 1;
+	u32	dbg_cnt_busy : 1;
+	u32	uc_busy : 1;
+	u32	ic_busy : 1;
+	u32	sdma_busy : 1;
+	u32	reserved0 : 19;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_busy_reg_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_busy_reg_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_event_pending_s {
+	u32 chid_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_event_pending_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_event_pending_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_timer_pending_s {
+	u32 chid_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_timer_pending_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_timer_pending_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_rd_wr_pending_s {
+	u32 chid_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_rd_wr_pending_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_rd_wr_pending_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_countern_s {
+	u32	counter_value : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_countern_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_countern_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_from_sw_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_from_sw_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_from_sw_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_stall_s {
+	u32	mcs_stall : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_stall_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_stall_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_for_debug_s {
+	u32	iram_ptr : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_for_debug_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_for_debug_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_qsb_log_err_trns_id_s {
+	u32	err_write : 1;
+	u32	reserved0 : 7;
+	u32	err_tid : 8;
+	u32	err_mid : 8;
+	u32	err_saved : 1;
+	u32	reserved1 : 7;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_qsb_log_err_trns_id_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_qsb_log_err_trns_id_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_rf_n_read_s {
+	u32 rf_reg : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_rf_n_read_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_rf_n_read_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_gsi_debug_ee_n_ev_k_vp_table_s {
+	u32	phy_ev_ch : 5;
+	u32	valid : 1;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_gsi_top_gsi_debug_ee_n_ev_k_vp_table_u {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_ee_n_ev_k_vp_table_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_0_s {
+	u32	chtype_protocol : 3;
+	u32	chtype_dir : 1;
+	u32	ee : 4;
+	u32	chid : 5;
+	u32	chtype_protocol_msb : 1;
+	u32	erindex : 5;
+	u32	reserved0 : 1;
+	u32	chstate : 4;
+	u32	element_size : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_0_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_1_s {
+	u32	r_length : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_1_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_2_s {
+	u32 r_base_addr_lsbs : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_2_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_2_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_3_s {
+	u32 r_base_addr_msbs : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_3_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_3_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_4_s {
+	u32 read_ptr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_4_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_4_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_5_s {
+	u32 read_ptr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_5_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_5_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_6_s {
+	u32 write_ptr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_6_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_6_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_7_s {
+	u32 write_ptr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_7_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_cntxt_7_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_read_ptr_s {
+	u32	read_ptr : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_read_ptr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_read_ptr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_write_ptr_s {
+	u32	re_intr_db : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_write_ptr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_re_fetch_write_ptr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_qos_s {
+	u32	wrr_weight : 4;
+	u32	reserved0 : 4;
+	u32	max_prefetch : 1;
+	u32	use_db_eng : 1;
+	u32	prefetch_mode : 4;
+	u32	reserved1 : 2;
+	u32	empty_lvl_thrshold : 8;
+	u32	reserved2 : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_qos_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_qos_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_0_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_0_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_1_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_1_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_2_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_2_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_2_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_3_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_3_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_ch_k_scratch_3_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_0_s {
+	u32	chtype : 4;
+	u32	ee : 4;
+	u32	evchid : 8;
+	u32	intype : 1;
+	u32	reserved0 : 3;
+	u32	chstate : 4;
+	u32	element_size : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_1_s {
+	u32	r_length : 16;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_1_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_2_s {
+	u32 r_base_addr_lsbs : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_2_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_2_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_3_s {
+	u32 r_base_addr_msbs : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_3_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_3_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_4_s {
+	u32 read_ptr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_4_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_4_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_5_s {
+	u32 read_ptr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_5_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_5_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_6_s {
+	u32 write_ptr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_6_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_6_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_7_s {
+	u32 write_ptr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_7_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_7_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_8_s {
+	u32	int_modt : 16;
+	u32	int_modc : 8;
+	u32	int_mod_cnt : 8;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_8_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_8_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_9_s {
+	u32 intvec : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_9_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_9_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_10_s {
+	u32 msi_addr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_10_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_10_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_11_s {
+	u32 msi_addr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_11_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_11_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_12_s {
+	u32 rp_update_addr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_12_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_12_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_13_s {
+	u32 rp_update_addr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_13_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_cntxt_13_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_0_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_0_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_1_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_ev_ch_k_scratch_1_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_status_s {
+	u32	enabled : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_gsi_status_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_gsi_status_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_s {
+	u32	ch_ctrl : 1;
+	u32	ev_ctrl : 1;
+	u32	glob_ee : 1;
+	u32	ieob : 1;
+	u32	inter_ee_ch_ctrl : 1;
+	u32	inter_ee_ev_ctrl : 1;
+	u32	general : 1;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_msk_s {
+	u32	ch_ctrl : 1;
+	u32	ev_ctrl : 1;
+	u32	glob_ee : 1;
+	u32	ieob : 1;
+	u32	inter_ee_ch_ctrl : 1;
+	u32	inter_ee_ev_ctrl : 1;
+	u32	general : 1;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_msk_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_type_irq_msk_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_s {
+	u32 gsi_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_s {
+	u32 ev_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_msk_s {
+	u32	gsi_ch_bit_map_msk : 23;
+	u32	reserved0 : 9;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_msk_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_msk_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_msk_s {
+	u32	ev_ch_bit_map_msk : 20;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_msk_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_msk_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_clr_s {
+	u32 gsi_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_clr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_gsi_ch_irq_clr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_clr_s {
+	u32 ev_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_clr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ev_ch_irq_clr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_s {
+	u32 ev_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_msk_s {
+	u32	ev_ch_bit_map_msk : 20;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_msk_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_msk_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_clr_s {
+	u32 ev_ch_bit_map : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_clr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_src_ieob_irq_clr_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_glob_irq_stts_s {
+	u32	error_int : 1;
+	u32	gp_int1 : 1;
+	u32	gp_int2 : 1;
+	u32	gp_int3 : 1;
+	u32	reserved0 : 28;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_glob_irq_stts_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_glob_irq_stts_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_gsi_irq_stts_s {
+	u32	gsi_break_point : 1;
+	u32	gsi_bus_error : 1;
+	u32	gsi_cmd_fifo_ovrflow : 1;
+	u32	gsi_mcs_stack_ovrflow : 1;
+	u32	reserved0 : 28;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_gsi_irq_stts_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_gsi_irq_stts_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_intset_s {
+	u32	intype : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_intset_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_intset_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_lsb_s {
+	u32 msi_addr_lsb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_lsb_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_lsb_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_msb_s {
+	u32 msi_addr_msb : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_msb_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_msi_base_msb_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_error_log_s {
+	u32 error_log : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_error_log_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_error_log_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_error_log_clr_s {
+	u32 error_log_clr : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_error_log_clr_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_error_log_clr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_0_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_0_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_1_s {
+	u32 scratch : 32;
+};
+union ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_1_u {
+	struct ipa_hwio_def_ipa_gsi_top_ee_n_cntxt_scratch_1_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_comp_hw_version_s {
+	u32	step : 16;
+	u32	minor : 12;
+	u32	major : 4;
+};
+union ipa_hwio_def_ipa_comp_hw_version_u {
+	struct ipa_hwio_def_ipa_comp_hw_version_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_comp_cfg_s {
+	u32	reserved0 : 1;
+	u32	gsi_snoc_bypass_dis : 1;
+	u32	gen_qmb_0_snoc_bypass_dis : 1;
+	u32	gen_qmb_1_snoc_bypass_dis : 1;
+	u32	reserved1 : 1;
+	u32	ipa_qmb_select_by_address_cons_en : 1;
+	u32	ipa_qmb_select_by_address_prod_en : 1;
+	u32	gsi_multi_inorder_rd_dis : 1;
+	u32	gsi_multi_inorder_wr_dis : 1;
+	u32	gen_qmb_0_multi_inorder_rd_dis : 1;
+	u32	gen_qmb_1_multi_inorder_rd_dis : 1;
+	u32	gen_qmb_0_multi_inorder_wr_dis : 1;
+	u32	gen_qmb_1_multi_inorder_wr_dis : 1;
+	u32	gen_qmb_0_snoc_cnoc_loop_protection_disable : 1;
+	u32	gsi_snoc_cnoc_loop_protection_disable : 1;
+	u32	gsi_multi_axi_masters_dis : 1;
+	u32	ipa_qmb_select_by_address_global_en : 1;
+	u32	ipa_atomic_fetcher_arb_lock_dis : 4;
+	u32	ipa_full_flush_wait_rsc_closure_en : 1;
+	u32	reserved2 : 10;
+};
+union ipa_hwio_def_ipa_comp_cfg_u {
+	struct ipa_hwio_def_ipa_comp_cfg_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_route_s {
+	u32	route_dis : 1;
+	u32	route_def_pipe : 5;
+	u32	route_def_hdr_table : 1;
+	u32	route_def_hdr_ofst : 10;
+	u32	route_frag_def_pipe : 5;
+	u32	reserved0 : 2;
+	u32	route_def_retain_hdr : 1;
+	u32	reserved1 : 7;
+};
+union ipa_hwio_def_ipa_route_u {
+	struct ipa_hwio_def_ipa_route_s def;
+	u32				value;
+};
+struct ipa_hwio_def_ipa_proc_iph_cfg_s {
+	u32	iph_threshold : 2;
+	u32	iph_pipelining_disable : 1;
+	u32	reserved0 : 1;
+	u32	status_from_iph_frst_always : 1;
+	u32	iph_nat_blind_invalidate_tport_offset_disable : 1;
+	u32	pipestage_overlap_disable : 1;
+	u32	ftch_dcph_overlap_enable : 1;
+	u32	iph_pkt_parser_protocol_stop_enable : 1;
+	u32	iph_pkt_parser_protocol_stop_hop : 1;
+	u32	iph_pkt_parser_protocol_stop_dest : 1;
+	u32	iph_pkt_parser_ihl_to_2nd_frag_en : 1;
+	u32	reserved1 : 4;
+	u32	iph_pkt_parser_protocol_stop_value : 8;
+	u32	d_dcph_multi_engine_disable : 1;
+	u32	reserved2 : 7;
+};
+union ipa_hwio_def_ipa_proc_iph_cfg_u {
+	struct ipa_hwio_def_ipa_proc_iph_cfg_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_dpl_timer_lsb_s {
+	u32 tod_lsb : 32;
+};
+union ipa_hwio_def_ipa_dpl_timer_lsb_u {
+	struct ipa_hwio_def_ipa_dpl_timer_lsb_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_dpl_timer_msb_s {
+	u32	tod_msb : 16;
+	u32	reserved0 : 15;
+	u32	timer_en : 1;
+};
+union ipa_hwio_def_ipa_dpl_timer_msb_u {
+	struct ipa_hwio_def_ipa_dpl_timer_msb_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_tx_wrapper_s {
+	u32	tx0_idle : 1;
+	u32	tx1_idle : 1;
+	u32	ipa_prod_ackmngr_db_empty : 1;
+	u32	ipa_prod_ackmngr_state_idle : 1;
+	u32	ipa_prod_bresp_empty : 1;
+	u32	ipa_prod_bresp_toggle_idle : 1;
+	u32	ipa_mbim_pkt_fms_idle : 1;
+	u32	mbim_direct_dma : 2;
+	u32	trnseq_force_valid : 1;
+	u32	pkt_drop_cnt_idle : 1;
+	u32	nlo_direct_dma : 2;
+	u32	coal_direct_dma : 2;
+	u32	coal_slave_idle : 1;
+	u32	coal_slave_ctx_idle : 1;
+	u32	reserved0 : 8;
+	u32	coal_slave_open_frame : 4;
+	u32	reserved1 : 3;
+};
+union ipa_hwio_def_ipa_state_tx_wrapper_u {
+	struct ipa_hwio_def_ipa_state_tx_wrapper_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_tx1_s {
+	u32	flopped_arbit_type : 3;
+	u32	arbit_type : 3;
+	u32	pa_idle : 1;
+	u32	pa_ctx_idle : 1;
+	u32	pa_rst_idle : 1;
+	u32	pa_pub_cnt_empty : 1;
+	u32	tx_cmd_main_idle : 1;
+	u32	tx_cmd_trnseq_idle : 1;
+	u32	tx_cmd_snif_idle : 1;
+	u32	tx_cmd_bresp_aloc_idle : 1;
+	u32	tx_cmd_bresp_inj_idle : 1;
+	u32	ar_idle : 1;
+	u32	dmaw_idle : 1;
+	u32	dmaw_last_outsd_idle : 1;
+	u32	pf_idle : 1;
+	u32	pf_empty : 1;
+	u32	aligner_empty : 1;
+	u32	holb_idle : 1;
+	u32	holb_mask_idle : 1;
+	u32	rsrcrel_idle : 1;
+	u32	suspend_empty : 1;
+	u32	cs_snif_idle : 1;
+	u32	last_cmd_pipe : 5;
+	u32	suspend_req_empty : 1;
+};
+union ipa_hwio_def_ipa_state_tx1_u {
+	struct ipa_hwio_def_ipa_state_tx1_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_fetcher_s {
+	u32	ipa_hps_ftch_state_idle : 1;
+	u32	ipa_hps_ftch_alloc_state_idle : 1;
+	u32	ipa_hps_ftch_pkt_state_idle : 1;
+	u32	ipa_hps_ftch_imm_state_idle : 1;
+	u32	ipa_hps_ftch_cmplt_state_idle : 1;
+	u32	ipa_hps_dmar_state_idle : 7;
+	u32	ipa_hps_dmar_slot_state_idle : 7;
+	u32	ipa_hps_imm_cmd_exec_state_idle : 1;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_state_fetcher_u {
+	struct ipa_hwio_def_ipa_state_fetcher_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_fetcher_mask_0_s {
+	u32	mask_queue_dmar_uses_queue : 8;
+	u32	mask_queue_imm_exec : 8;
+	u32	mask_queue_no_resources_context : 8;
+	u32	mask_queue_no_resources_hps_dmar : 8;
+};
+union ipa_hwio_def_ipa_state_fetcher_mask_0_u {
+	struct ipa_hwio_def_ipa_state_fetcher_mask_0_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_fetcher_mask_1_s {
+	u32	mask_queue_no_resources_ack_entry : 8;
+	u32	mask_queue_arb_lock : 8;
+	u32	mask_queue_step_mode : 8;
+	u32	mask_queue_no_space_dpl_fifo : 8;
+};
+union ipa_hwio_def_ipa_state_fetcher_mask_1_u {
+	struct ipa_hwio_def_ipa_state_fetcher_mask_1_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_dpl_fifo_s {
+	u32	pop_fsm_state : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_state_dpl_fifo_u {
+	struct ipa_hwio_def_ipa_state_dpl_fifo_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_coal_master_s {
+	u32	vp_vld : 4;
+	u32	main_fsm_state : 4;
+	u32	find_open_fsm_state : 4;
+	u32	hash_calc_fsm_state : 4;
+	u32	check_fit_fsm_state : 4;
+	u32	init_vp_fsm_state : 4;
+	u32	lru_vp : 4;
+	u32	vp_timer_expired : 4;
+};
+union ipa_hwio_def_ipa_state_coal_master_u {
+	struct ipa_hwio_def_ipa_state_coal_master_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_dfetcher_s {
+	u32	ipa_dps_ftch_pkt_state_idle : 1;
+	u32	ipa_dps_ftch_cmplt_state_idle : 1;
+	u32	reserved0 : 2;
+	u32	ipa_dps_dmar_state_idle : 6;
+	u32	reserved1 : 2;
+	u32	ipa_dps_dmar_slot_state_idle : 6;
+	u32	reserved2 : 14;
+};
+union ipa_hwio_def_ipa_state_dfetcher_u {
+	struct ipa_hwio_def_ipa_state_dfetcher_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_acl_s {
+	u32	ipa_hps_h_dcph_empty : 1;
+	u32	ipa_hps_h_dcph_active : 1;
+	u32	ipa_hps_pkt_parser_empty : 1;
+	u32	ipa_hps_pkt_parser_active : 1;
+	u32	ipa_hps_filter_nat_empty : 1;
+	u32	ipa_hps_filter_nat_active : 1;
+	u32	ipa_hps_router_empty : 1;
+	u32	ipa_hps_router_active : 1;
+	u32	ipa_hps_hdri_empty : 1;
+	u32	ipa_hps_hdri_active : 1;
+	u32	ipa_hps_ucp_empty : 1;
+	u32	ipa_hps_ucp_active : 1;
+	u32	ipa_hps_enqueuer_empty : 1;
+	u32	ipa_hps_enqueuer_active : 1;
+	u32	ipa_dps_d_dcph_empty : 1;
+	u32	ipa_dps_d_dcph_active : 1;
+	u32	reserved0 : 2;
+	u32	ipa_dps_dispatcher_empty : 1;
+	u32	ipa_dps_dispatcher_active : 1;
+	u32	ipa_dps_d_dcph_2_empty : 1;
+	u32	ipa_dps_d_dcph_2_active : 1;
+	u32	ipa_hps_sequencer_idle : 1;
+	u32	ipa_dps_sequencer_idle : 1;
+	u32	ipa_dps_d_dcph_2nd_empty : 1;
+	u32	ipa_dps_d_dcph_2nd_active : 1;
+	u32	ipa_hps_coal_master_empty : 1;
+	u32	ipa_hps_coal_master_active : 1;
+	u32	reserved1 : 4;
+};
+union ipa_hwio_def_ipa_state_acl_u {
+	struct ipa_hwio_def_ipa_state_acl_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_gsi_tlv_s {
+	u32	ipa_gsi_toggle_fsm_idle : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_state_gsi_tlv_u {
+	struct ipa_hwio_def_ipa_state_gsi_tlv_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_gsi_aos_s {
+	u32	ipa_gsi_aos_fsm_idle : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_state_gsi_aos_u {
+	struct ipa_hwio_def_ipa_state_gsi_aos_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_gsi_if_s {
+	u32	ipa_gsi_prod_fsm_tx_0 : 4;
+	u32	ipa_gsi_prod_fsm_tx_1 : 4;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_state_gsi_if_u {
+	struct ipa_hwio_def_ipa_state_gsi_if_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_gsi_skip_s {
+	u32	ipa_gsi_skip_fsm : 2;
+	u32	reserved0 : 30;
+};
+union ipa_hwio_def_ipa_state_gsi_skip_u {
+	struct ipa_hwio_def_ipa_state_gsi_skip_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_gsi_if_cons_s {
+	u32	state : 1;
+	u32	cache_vld : 6;
+	u32	rx_req : 10;
+	u32	rx_req_no_zero : 10;
+	u32	reserved0 : 5;
+};
+union ipa_hwio_def_ipa_state_gsi_if_cons_u {
+	struct ipa_hwio_def_ipa_state_gsi_if_cons_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_s {
+	u32	rx_wait : 1;
+	u32	rx_idle : 1;
+	u32	tx_idle : 1;
+	u32	dpl_fifo_idle : 1;
+	u32	bam_gsi_idle : 1;
+	u32	ipa_status_sniffer_idle : 1;
+	u32	ipa_noc_idle : 1;
+	u32	aggr_idle : 1;
+	u32	mbim_aggr_idle : 1;
+	u32	ipa_rsrc_mngr_db_empty : 1;
+	u32	ipa_rsrc_state_idle : 1;
+	u32	ipa_ackmngr_db_empty : 1;
+	u32	ipa_ackmngr_state_idle : 1;
+	u32	ipa_tx_ackq_full : 1;
+	u32	ipa_prod_ackmngr_db_empty : 1;
+	u32	ipa_prod_ackmngr_state_idle : 1;
+	u32	ipa_prod_bresp_idle : 1;
+	u32	ipa_full_idle : 1;
+	u32	ipa_ntf_tx_empty : 1;
+	u32	ipa_tx_ackq_empty : 1;
+	u32	ipa_uc_ackq_empty : 1;
+	u32	ipa_rx_ackq_empty : 1;
+	u32	ipa_tx_commander_cmdq_empty : 1;
+	u32	ipa_rx_splt_cmdq_empty : 4;
+	u32	reserved0 : 1;
+	u32	ipa_rx_hps_empty : 1;
+	u32	ipa_hps_dps_empty : 1;
+	u32	ipa_dps_tx_empty : 1;
+	u32	ipa_uc_rx_hnd_cmdq_empty : 1;
+};
+union ipa_hwio_def_ipa_state_u {
+	struct ipa_hwio_def_ipa_state_s def;
+	u32				value;
+};
+struct ipa_hwio_def_ipa_state_rx_active_s {
+	u32	endpoints : 13;
+	u32	reserved0 : 19;
+};
+union ipa_hwio_def_ipa_state_rx_active_u {
+	struct ipa_hwio_def_ipa_state_rx_active_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_tx0_s {
+	u32	last_arbit_type : 2;
+	u32	next_arbit_type : 2;
+	u32	pa_idle : 1;
+	u32	pa_ctx_idle : 1;
+	u32	pa_pub_cnt_empty : 1;
+	u32	tx_cmd_main_idle : 1;
+	u32	tx_cmd_trnseq_idle : 1;
+	u32	tx_cmd_snif_idle : 1;
+	u32	tx_cmd_bresp_aloc_idle : 1;
+	u32	tx_cmd_bresp_inj_idle : 1;
+	u32	ar_idle : 1;
+	u32	dmaw_idle : 1;
+	u32	dmaw_last_outsd_idle : 1;
+	u32	pf_idle : 1;
+	u32	pf_empty : 1;
+	u32	aligner_empty : 1;
+	u32	holb_idle : 1;
+	u32	holb_mask_idle : 1;
+	u32	rsrcrel_idle : 1;
+	u32	suspend_empty : 1;
+	u32	cs_snif_idle : 1;
+	u32	last_cmd_pipe : 5;
+	u32	reserved0 : 4;
+};
+union ipa_hwio_def_ipa_state_tx0_u {
+	struct ipa_hwio_def_ipa_state_tx0_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_state_aggr_active_s {
+	u32	endpoints : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_state_aggr_active_u {
+	struct ipa_hwio_def_ipa_state_aggr_active_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_nlo_aggr_s {
+	u32 nlo_aggr_state : 32;
+};
+union ipa_hwio_def_ipa_state_nlo_aggr_u {
+	struct ipa_hwio_def_ipa_state_nlo_aggr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_state_coal_master_1_s {
+	u32	init_vp_wr_ctx_line : 6;
+	u32	init_vp_rd_pkt_line : 6;
+	u32	init_vp_fsm_state : 4;
+	u32	check_fit_rd_ctx_line : 6;
+	u32	check_fit_fsm_state : 4;
+	u32	arbiter_state : 4;
+	u32	reserved0 : 2;
+};
+union ipa_hwio_def_ipa_state_coal_master_1_u {
+	struct ipa_hwio_def_ipa_state_coal_master_1_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_filt_rout_hash_en_s {
+	u32	ipv6_router_hash_en : 1;
+	u32	reserved0 : 3;
+	u32	ipv6_filter_hash_en : 1;
+	u32	reserved1 : 3;
+	u32	ipv4_router_hash_en : 1;
+	u32	reserved2 : 3;
+	u32	ipv4_filter_hash_en : 1;
+	u32	reserved3 : 19;
+};
+union ipa_hwio_def_ipa_filt_rout_hash_en_u {
+	struct ipa_hwio_def_ipa_filt_rout_hash_en_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_filt_rout_hash_flush_s {
+	u32	ipv6_router_hash_flush : 1;
+	u32	reserved0 : 3;
+	u32	ipv6_filter_hash_flush : 1;
+	u32	reserved1 : 3;
+	u32	ipv4_router_hash_flush : 1;
+	u32	reserved2 : 3;
+	u32	ipv4_filter_hash_flush : 1;
+	u32	reserved3 : 19;
+};
+union ipa_hwio_def_ipa_filt_rout_hash_flush_u {
+	struct ipa_hwio_def_ipa_filt_rout_hash_flush_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ipv4_filter_init_values_s {
+	u32	ip_v4_filter_init_hashed_addr : 16;
+	u32	ip_v4_filter_init_non_hashed_addr : 16;
+};
+union ipa_hwio_def_ipa_ipv4_filter_init_values_u {
+	struct ipa_hwio_def_ipa_ipv4_filter_init_values_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ipv6_filter_init_values_s {
+	u32	ip_v6_filter_init_hashed_addr : 16;
+	u32	ip_v6_filter_init_non_hashed_addr : 16;
+};
+union ipa_hwio_def_ipa_ipv6_filter_init_values_u {
+	struct ipa_hwio_def_ipa_ipv6_filter_init_values_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ipv4_route_init_values_s {
+	u32	ip_v4_route_init_hashed_addr : 16;
+	u32	ip_v4_route_init_non_hashed_addr : 16;
+};
+union ipa_hwio_def_ipa_ipv4_route_init_values_u {
+	struct ipa_hwio_def_ipa_ipv4_route_init_values_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ipv6_route_init_values_s {
+	u32	ip_v6_route_init_hashed_addr : 16;
+	u32	ip_v6_route_init_non_hashed_addr : 16;
+};
+union ipa_hwio_def_ipa_ipv6_route_init_values_u {
+	struct ipa_hwio_def_ipa_ipv6_route_init_values_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_bam_activated_ports_s {
+	u32	endpoints : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_bam_activated_ports_u {
+	struct ipa_hwio_def_ipa_bam_activated_ports_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_s {
+	u32	zero : 3;
+	u32	addr : 29;
+};
+union ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_u {
+	struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_msb_s {
+	u32 addr : 32;
+};
+union ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_msb_u {
+	struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_local_pkt_proc_cntxt_base_s {
+	u32	zero : 3;
+	u32	addr : 15;
+	u32	reserved0 : 14;
+};
+union ipa_hwio_def_ipa_local_pkt_proc_cntxt_base_u {
+	struct ipa_hwio_def_ipa_local_pkt_proc_cntxt_base_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_src_rsrc_grp_01_rsrc_type_n_s {
+	u32	src_rsrc_grp_0_min_limit : 6;
+	u32	reserved0 : 2;
+	u32	src_rsrc_grp_0_max_limit : 6;
+	u32	reserved1 : 2;
+	u32	src_rsrc_grp_1_min_limit : 6;
+	u32	reserved2 : 2;
+	u32	src_rsrc_grp_1_max_limit : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_src_rsrc_grp_01_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_01_rsrc_type_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_src_rsrc_grp_23_rsrc_type_n_s {
+	u32	src_rsrc_grp_2_min_limit : 6;
+	u32	reserved0 : 2;
+	u32	src_rsrc_grp_2_max_limit : 6;
+	u32	reserved1 : 2;
+	u32	src_rsrc_grp_3_min_limit : 6;
+	u32	reserved2 : 2;
+	u32	src_rsrc_grp_3_max_limit : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_src_rsrc_grp_23_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_23_rsrc_type_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_s {
+	u32	src_rsrc_grp_0_cnt : 6;
+	u32	reserved0 : 2;
+	u32	src_rsrc_grp_1_cnt : 6;
+	u32	reserved1 : 2;
+	u32	src_rsrc_grp_2_cnt : 6;
+	u32	reserved2 : 2;
+	u32	src_rsrc_grp_3_cnt : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_u {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_dst_rsrc_grp_01_rsrc_type_n_s {
+	u32	dst_rsrc_grp_0_min_limit : 6;
+	u32	reserved0 : 2;
+	u32	dst_rsrc_grp_0_max_limit : 6;
+	u32	reserved1 : 2;
+	u32	dst_rsrc_grp_1_min_limit : 6;
+	u32	reserved2 : 2;
+	u32	dst_rsrc_grp_1_max_limit : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_dst_rsrc_grp_01_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_01_rsrc_type_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_dst_rsrc_grp_23_rsrc_type_n_s {
+	u32	dst_rsrc_grp_2_min_limit : 6;
+	u32	reserved0 : 2;
+	u32	dst_rsrc_grp_2_max_limit : 6;
+	u32	reserved1 : 2;
+	u32	dst_rsrc_grp_3_min_limit : 6;
+	u32	reserved2 : 2;
+	u32	dst_rsrc_grp_3_max_limit : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_dst_rsrc_grp_23_rsrc_type_n_u {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_23_rsrc_type_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_s {
+	u32	dst_rsrc_grp_0_cnt : 6;
+	u32	reserved0 : 2;
+	u32	dst_rsrc_grp_1_cnt : 6;
+	u32	reserved1 : 2;
+	u32	dst_rsrc_grp_2_cnt : 6;
+	u32	reserved2 : 2;
+	u32	dst_rsrc_grp_3_cnt : 6;
+	u32	reserved3 : 2;
+};
+union ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_u {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_rsrc_grp_cfg_s {
+	u32	src_grp_special_valid : 1;
+	u32	reserved0 : 3;
+	u32	src_grp_special_index : 3;
+	u32	reserved1 : 1;
+	u32	dst_pipe_special_valid : 1;
+	u32	reserved2 : 3;
+	u32	dst_pipe_special_index : 5;
+	u32	reserved3 : 3;
+	u32	dst_grp_special_valid : 1;
+	u32	reserved4 : 3;
+	u32	dst_grp_special_index : 6;
+	u32	reserved5 : 2;
+};
+union ipa_hwio_def_ipa_rsrc_grp_cfg_u {
+	struct ipa_hwio_def_ipa_rsrc_grp_cfg_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_pipeline_disable_s {
+	u32	reserved0 : 3;
+	u32	rx_cmdq_splitter_dis : 1;
+	u32	reserved1 : 28;
+};
+union ipa_hwio_def_ipa_pipeline_disable_u {
+	struct ipa_hwio_def_ipa_pipeline_disable_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_ctrl_n_s {
+	u32	endp_suspend : 1;
+	u32	endp_delay : 1;
+	u32	reserved0 : 30;
+};
+union ipa_hwio_def_ipa_endp_init_ctrl_n_u {
+	struct ipa_hwio_def_ipa_endp_init_ctrl_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_ctrl_scnd_n_s {
+	u32	reserved0 : 1;
+	u32	endp_delay : 1;
+	u32	reserved1 : 30;
+};
+union ipa_hwio_def_ipa_endp_init_ctrl_scnd_n_u {
+	struct ipa_hwio_def_ipa_endp_init_ctrl_scnd_n_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_cfg_n_s {
+	u32	frag_offload_en : 1;
+	u32	cs_offload_en : 2;
+	u32	cs_metadata_hdr_offset : 4;
+	u32	reserved0 : 1;
+	u32	gen_qmb_master_sel : 1;
+	u32	reserved1 : 23;
+};
+union ipa_hwio_def_ipa_endp_init_cfg_n_u {
+	struct ipa_hwio_def_ipa_endp_init_cfg_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_nat_n_s {
+	u32	nat_en : 2;
+	u32	reserved0 : 30;
+};
+union ipa_hwio_def_ipa_endp_init_nat_n_u {
+	struct ipa_hwio_def_ipa_endp_init_nat_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_hdr_n_s {
+	u32	hdr_len : 6;
+	u32	hdr_ofst_metadata_valid : 1;
+	u32	hdr_ofst_metadata : 6;
+	u32	hdr_additional_const_len : 6;
+	u32	hdr_ofst_pkt_size_valid : 1;
+	u32	hdr_ofst_pkt_size : 6;
+	u32	hdr_a5_mux : 1;
+	u32	hdr_len_inc_deagg_hdr : 1;
+	u32	hdr_len_msb : 2;
+	u32	hdr_ofst_metadata_msb : 2;
+};
+union ipa_hwio_def_ipa_endp_init_hdr_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hdr_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_hdr_ext_n_s {
+	u32	hdr_endianness : 1;
+	u32	hdr_total_len_or_pad_valid : 1;
+	u32	hdr_total_len_or_pad : 1;
+	u32	hdr_payload_len_inc_padding : 1;
+	u32	hdr_total_len_or_pad_offset : 6;
+	u32	hdr_pad_to_alignment : 4;
+	u32	reserved0 : 2;
+	u32	hdr_total_len_or_pad_offset_msb : 2;
+	u32	hdr_ofst_pkt_size_msb : 2;
+	u32	hdr_additional_const_len_msb : 2;
+	u32	reserved1 : 10;
+};
+union ipa_hwio_def_ipa_endp_init_hdr_ext_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hdr_ext_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_hdr_metadata_mask_n_s {
+	u32 metadata_mask : 32;
+};
+union ipa_hwio_def_ipa_endp_init_hdr_metadata_mask_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hdr_metadata_mask_n_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_endp_init_hdr_metadata_n_s {
+	u32 metadata : 32;
+};
+union ipa_hwio_def_ipa_endp_init_hdr_metadata_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hdr_metadata_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_endp_init_mode_n_s {
+	u32	mode : 3;
+	u32	dcph_enable : 1;
+	u32	dest_pipe_index : 5;
+	u32	reserved0 : 3;
+	u32	byte_threshold : 16;
+	u32	pipe_replicate_en : 1;
+	u32	pad_en : 1;
+	u32	reserved1 : 2;
+};
+union ipa_hwio_def_ipa_endp_init_mode_n_u {
+	struct ipa_hwio_def_ipa_endp_init_mode_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_aggr_n_s {
+	u32	aggr_en : 2;
+	u32	aggr_type : 3;
+	u32	aggr_byte_limit : 6;
+	u32	reserved0 : 1;
+	u32	aggr_time_limit : 5;
+	u32	aggr_pkt_limit : 6;
+	u32	aggr_sw_eof_active : 1;
+	u32	aggr_force_close : 1;
+	u32	reserved1 : 1;
+	u32	aggr_hard_byte_limit_enable : 1;
+	u32	aggr_gran_sel : 1;
+	u32	reserved2 : 4;
+};
+union ipa_hwio_def_ipa_endp_init_aggr_n_u {
+	struct ipa_hwio_def_ipa_endp_init_aggr_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_hol_block_en_n_s {
+	u32	en : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_endp_init_hol_block_en_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hol_block_en_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_endp_init_hol_block_timer_n_s {
+	u32	time_limit : 5;
+	u32	reserved0 : 3;
+	u32	gran_sel : 1;
+	u32	reserved1 : 23;
+};
+union ipa_hwio_def_ipa_endp_init_hol_block_timer_n_u {
+	struct ipa_hwio_def_ipa_endp_init_hol_block_timer_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_endp_init_deaggr_n_s {
+	u32	deaggr_hdr_len : 6;
+	u32	syspipe_err_detection : 1;
+	u32	packet_offset_valid : 1;
+	u32	packet_offset_location : 6;
+	u32	ignore_min_pkt_err : 1;
+	u32	reserved0 : 1;
+	u32	max_packet_len : 16;
+};
+union ipa_hwio_def_ipa_endp_init_deaggr_n_u {
+	struct ipa_hwio_def_ipa_endp_init_deaggr_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_rsrc_grp_n_s {
+	u32	rsrc_grp : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_endp_init_rsrc_grp_n_u {
+	struct ipa_hwio_def_ipa_endp_init_rsrc_grp_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_init_seq_n_s {
+	u32	hps_seq_type : 4;
+	u32	dps_seq_type : 4;
+	u32	hps_rep_seq_type : 4;
+	u32	dps_rep_seq_type : 4;
+	u32	reserved0 : 16;
+};
+union ipa_hwio_def_ipa_endp_init_seq_n_u {
+	struct ipa_hwio_def_ipa_endp_init_seq_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_status_n_s {
+	u32	status_en : 1;
+	u32	status_endp : 5;
+	u32	reserved0 : 3;
+	u32	status_pkt_suppress : 1;
+	u32	reserved1 : 22;
+};
+union ipa_hwio_def_ipa_endp_status_n_u {
+	struct ipa_hwio_def_ipa_endp_status_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_endp_filter_router_hsh_cfg_n_s {
+	u32	filter_hash_msk_src_id : 1;
+	u32	filter_hash_msk_src_ip_add : 1;
+	u32	filter_hash_msk_dst_ip_add : 1;
+	u32	filter_hash_msk_src_port : 1;
+	u32	filter_hash_msk_dst_port : 1;
+	u32	filter_hash_msk_protocol : 1;
+	u32	filter_hash_msk_metadata : 1;
+	u32	reserved0 : 9;
+	u32	router_hash_msk_src_id : 1;
+	u32	router_hash_msk_src_ip_add : 1;
+	u32	router_hash_msk_dst_ip_add : 1;
+	u32	router_hash_msk_src_port : 1;
+	u32	router_hash_msk_dst_port : 1;
+	u32	router_hash_msk_protocol : 1;
+	u32	router_hash_msk_metadata : 1;
+	u32	reserved1 : 9;
+};
+union ipa_hwio_def_ipa_endp_filter_router_hsh_cfg_n_u {
+	struct ipa_hwio_def_ipa_endp_filter_router_hsh_cfg_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_nlo_pp_cfg1_s {
+	u32	nlo_ack_pp : 8;
+	u32	nlo_data_pp : 8;
+	u32	nlo_status_pp : 8;
+	u32	nlo_ack_max_vp : 6;
+	u32	reserved0 : 2;
+};
+union ipa_hwio_def_ipa_nlo_pp_cfg1_u {
+	struct ipa_hwio_def_ipa_nlo_pp_cfg1_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_nlo_pp_cfg2_s {
+	u32	nlo_ack_close_padd : 8;
+	u32	nlo_data_close_padd : 8;
+	u32	nlo_ack_buffer_mode : 1;
+	u32	nlo_data_buffer_mode : 1;
+	u32	nlo_status_buffer_mode : 1;
+	u32	reserved0 : 13;
+};
+union ipa_hwio_def_ipa_nlo_pp_cfg2_u {
+	struct ipa_hwio_def_ipa_nlo_pp_cfg2_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_nlo_pp_ack_limit_cfg_s {
+	u32	nlo_ack_lower_size : 16;
+	u32	nlo_ack_upper_size : 16;
+};
+union ipa_hwio_def_ipa_nlo_pp_ack_limit_cfg_u {
+	struct ipa_hwio_def_ipa_nlo_pp_ack_limit_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_pp_data_limit_cfg_s {
+	u32	nlo_data_lower_size : 16;
+	u32	nlo_data_upper_size : 16;
+};
+union ipa_hwio_def_ipa_nlo_pp_data_limit_cfg_u {
+	struct ipa_hwio_def_ipa_nlo_pp_data_limit_cfg_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_min_dsm_cfg_s {
+	u32	nlo_ack_min_dsm_len : 16;
+	u32	nlo_data_min_dsm_len : 16;
+};
+union ipa_hwio_def_ipa_nlo_min_dsm_cfg_u {
+	struct ipa_hwio_def_ipa_nlo_min_dsm_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_flush_req_s {
+	u32	vp_flush_pp_indx : 8;
+	u32	reserved0 : 8;
+	u32	vp_flush_vp_indx : 8;
+	u32	reserved1 : 7;
+	u32	vp_flush_req : 1;
+};
+union ipa_hwio_def_ipa_nlo_vp_flush_req_u {
+	struct ipa_hwio_def_ipa_nlo_vp_flush_req_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_flush_cookie_s {
+	u32 vp_flush_cookie : 32;
+};
+union ipa_hwio_def_ipa_nlo_vp_flush_cookie_u {
+	struct ipa_hwio_def_ipa_nlo_vp_flush_cookie_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_flush_ack_s {
+	u32	vp_flush_ack : 1;
+	u32	reserved0 : 31;
+};
+union ipa_hwio_def_ipa_nlo_vp_flush_ack_u {
+	struct ipa_hwio_def_ipa_nlo_vp_flush_ack_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_dsm_open_s {
+	u32 vp_dsm_open : 32;
+};
+union ipa_hwio_def_ipa_nlo_vp_dsm_open_u {
+	struct ipa_hwio_def_ipa_nlo_vp_dsm_open_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_nlo_vp_qbap_open_s {
+	u32 vp_qbap_open : 32;
+};
+union ipa_hwio_def_ipa_nlo_vp_qbap_open_u {
+	struct ipa_hwio_def_ipa_nlo_vp_qbap_open_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rsrc_mngr_db_cfg_s {
+	u32	rsrc_grp_sel : 3;
+	u32	reserved0 : 1;
+	u32	rsrc_type_sel : 3;
+	u32	reserved1 : 1;
+	u32	rsrc_id_sel : 6;
+	u32	reserved2 : 18;
+};
+union ipa_hwio_def_ipa_rsrc_mngr_db_cfg_u {
+	struct ipa_hwio_def_ipa_rsrc_mngr_db_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_s {
+	u32	rsrc_occupied : 1;
+	u32	rsrc_next_valid : 1;
+	u32	reserved0 : 2;
+	u32	rsrc_next_index : 6;
+	u32	reserved1 : 22;
+};
+union ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_u {
+	struct ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rsrc_mngr_db_list_read_s {
+	u32	rsrc_list_valid : 1;
+	u32	rsrc_list_hold : 1;
+	u32	reserved0 : 2;
+	u32	rsrc_list_head_rsrc : 6;
+	u32	reserved1 : 2;
+	u32	rsrc_list_head_cnt : 7;
+	u32	reserved2 : 1;
+	u32	rsrc_list_entry_cnt : 7;
+	u32	reserved3 : 5;
+};
+union ipa_hwio_def_ipa_rsrc_mngr_db_list_read_u {
+	struct ipa_hwio_def_ipa_rsrc_mngr_db_list_read_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_debug_data_s {
+	u32 debug_data : 32;
+};
+union ipa_hwio_def_ipa_debug_data_u {
+	struct ipa_hwio_def_ipa_debug_data_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_testbus_sel_s {
+	u32	testbus_en : 1;
+	u32	reserved0 : 3;
+	u32	external_block_select : 8;
+	u32	internal_block_select : 8;
+	u32	pipe_select : 5;
+	u32	reserved1 : 7;
+};
+union ipa_hwio_def_ipa_testbus_sel_u {
+	struct ipa_hwio_def_ipa_testbus_sel_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_step_mode_breakpoints_s {
+	u32 hw_en : 32;
+};
+union ipa_hwio_def_ipa_step_mode_breakpoints_u {
+	struct ipa_hwio_def_ipa_step_mode_breakpoints_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_step_mode_status_s {
+	u32 hw_en : 32;
+};
+union ipa_hwio_def_ipa_step_mode_status_u {
+	struct ipa_hwio_def_ipa_step_mode_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_s {
+	u32	reserved0 : 1;
+	u32	log_en : 1;
+	u32	reserved1 : 2;
+	u32	log_pipe : 5;
+	u32	reserved2 : 3;
+	u32	log_length : 8;
+	u32	log_reduction_en : 1;
+	u32	log_dpl_l2_remove_en : 1;
+	u32	reserved3 : 10;
+};
+union ipa_hwio_def_ipa_log_u {
+	struct ipa_hwio_def_ipa_log_s	def;
+	u32				value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_s {
+	u32 start_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_addr_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_msb_s {
+	u32 start_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_addr_msb_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_s {
+	u32 writr_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_msb_s {
+	u32 writr_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_msb_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_cfg_s {
+	u32	size : 16;
+	u32	enable : 1;
+	u32	skip_ddr_dma : 1;
+	u32	reserved0 : 14;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_cfg_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_cmd_ram_ptr_s {
+	u32	read_ptr : 14;
+	u32	reserved0 : 2;
+	u32	write_ptr : 14;
+	u32	reserved1 : 1;
+	u32	skip_ddr_wrap_happened : 1;
+};
+union ipa_hwio_def_ipa_log_buf_hw_cmd_ram_ptr_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_ram_ptr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_cmd_n_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	release_rd_cmd : 1;
+	u32	release_wr_cmd : 1;
+	u32	release_rd_pkt : 1;
+	u32	release_wr_pkt : 1;
+	u32	release_rd_pkt_enhanced : 1;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_cmd_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_cmd_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_cfg_n_s {
+	u32	block_rd : 1;
+	u32	block_wr : 1;
+	u32	reserved0 : 30;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_cfg_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_cfg_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_0_n_s {
+	u32	cmdq_packet_len_f : 16;
+	u32	cmdq_src_len_f : 16;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_0_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_0_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_1_n_s {
+	u32	cmdq_src_pipe_f : 8;
+	u32	cmdq_order_f : 2;
+	u32	cmdq_flags_f : 6;
+	u32	cmdq_opcode_f : 8;
+	u32	cmdq_metadata_f : 8;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_1_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_1_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_2_n_s {
+	u32 cmdq_addr_lsb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_2_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_2_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_3_n_s {
+	u32 cmdq_addr_msb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_3_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_3_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_0_n_s {
+	u32	cmdq_packet_len_f : 16;
+	u32	cmdq_src_len_f : 16;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_0_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_0_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_1_n_s {
+	u32	cmdq_src_pipe_f : 8;
+	u32	cmdq_order_f : 2;
+	u32	cmdq_flags_f : 6;
+	u32	cmdq_opcode_f : 8;
+	u32	cmdq_metadata_f : 8;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_1_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_1_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_2_n_s {
+	u32 cmdq_addr_lsb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_2_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_2_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_3_n_s {
+	u32 cmdq_addr_msb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_3_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_3_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_splt_cmdq_status_n_s {
+	u32	status : 1;
+	u32	cmdq_empty : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_count : 2;
+	u32	cmdq_depth : 2;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_rx_splt_cmdq_status_n_u {
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_status_n_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_tx_commander_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_empty : 1;
+	u32	cmdq_full : 1;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_tx_commander_cmdq_status_u {
+	struct ipa_hwio_def_ipa_tx_commander_cmdq_status_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 3;
+	u32	rd_req : 1;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_s {
+	u32	cmdq_packet_len_f : 16;
+	u32	cmdq_dest_len_f : 16;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_1_s {
+	u32	cmdq_src_pipe_f : 8;
+	u32	cmdq_order_f : 2;
+	u32	cmdq_flags_f : 6;
+	u32	cmdq_opcode_f : 8;
+	u32	cmdq_metadata_f : 8;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_1_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_1_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_2_s {
+	u32 cmdq_addr_lsb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_2_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_2_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_3_s {
+	u32 cmdq_addr_msb_f : 32;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_3_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_3_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_status_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_status_empty_s {
+	u32	cmdq_empty : 5;
+	u32	reserved0 : 27;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_count_u {
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_rx_hps_clients_min_depth_0_s {
+	u32	client_0_min_depth : 4;
+	u32	reserved0 : 4;
+	u32	client_1_min_depth : 4;
+	u32	reserved1 : 4;
+	u32	client_2_min_depth : 4;
+	u32	reserved2 : 4;
+	u32	client_3_min_depth : 4;
+	u32	client_4_min_depth : 4;
+};
+union ipa_hwio_def_ipa_rx_hps_clients_min_depth_0_u {
+	struct ipa_hwio_def_ipa_rx_hps_clients_min_depth_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_rx_hps_clients_max_depth_0_s {
+	u32	client_0_max_depth : 4;
+	u32	reserved0 : 4;
+	u32	client_1_max_depth : 4;
+	u32	reserved1 : 4;
+	u32	client_2_max_depth : 4;
+	u32	reserved2 : 4;
+	u32	client_3_max_depth : 4;
+	u32	client_4_max_depth : 4;
+};
+union ipa_hwio_def_ipa_rx_hps_clients_max_depth_0_u {
+	struct ipa_hwio_def_ipa_rx_hps_clients_max_depth_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 5;
+	u32	rd_req : 1;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_data_rd_0_s {
+	u32	cmdq_ctx_id_f : 4;
+	u32	cmdq_src_id_f : 8;
+	u32	cmdq_src_pipe_f : 5;
+	u32	cmdq_opcode_f : 2;
+	u32	cmdq_rep_f : 1;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_data_rd_0_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_data_rd_0_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 6;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_status_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_status_empty_s {
+	u32	cmdq_empty : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_hps_dps_cmdq_count_s {
+	u32	fifo_count : 6;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_hps_dps_cmdq_count_u {
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 4;
+	u32	reserved0 : 1;
+	u32	rd_req : 1;
+	u32	reserved1 : 24;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_data_rd_0_s {
+	u32	cmdq_ctx_id_f : 4;
+	u32	cmdq_src_id_f : 8;
+	u32	cmdq_src_pipe_f : 5;
+	u32	cmdq_opcode_f : 2;
+	u32	cmdq_rep_f : 1;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_data_rd_0_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_data_rd_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_status_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_status_empty_s {
+	u32	cmdq_empty : 10;
+	u32	reserved0 : 22;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_dps_tx_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_dps_tx_cmdq_count_u {
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_en_s {
+	u32	bitmap : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_en_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_en_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_wr_n_rd_sel_s {
+	u32	bitmap : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_wr_n_rd_sel_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_wr_n_rd_sel_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_cli_mux_s {
+	u32	all_cli_mux_concat : 12;
+	u32	reserved0 : 20;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_cli_mux_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_cli_mux_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_0_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_0_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_0_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_1_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_1_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_1_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_2_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_2_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_2_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_3_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_3_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_comp_val_3_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_0_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_0_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_0_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_1_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_1_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_1_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_2_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_2_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_2_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_3_cli_n_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_3_cli_n_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_mask_val_3_cli_n_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_log_buf_hw_snif_legacy_rx_s {
+	u32	src_group_sel : 3;
+	u32	reserved0 : 29;
+};
+union ipa_hwio_def_ipa_log_buf_hw_snif_legacy_rx_u {
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_legacy_rx_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 5;
+	u32	rd_req : 1;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_data_rd_s {
+	u32	cmdq_src_id : 8;
+	u32	cmdq_length : 16;
+	u32	cmdq_origin : 1;
+	u32	cmdq_sent : 1;
+	u32	cmdq_src_id_valid : 1;
+	u32	reserved0 : 5;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_data_rd_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_data_rd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_status_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_status_empty_s {
+	u32	cmdq_empty : 13;
+	u32	reserved0 : 19;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ackmngr_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_ackmngr_cmdq_count_u {
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_fifo_status_ctrl_s {
+	u32	ipa_gsi_fifo_status_port_sel : 5;
+	u32	ipa_gsi_fifo_status_en : 1;
+	u32	reserved0 : 26;
+};
+union ipa_hwio_def_ipa_gsi_fifo_status_ctrl_u {
+	struct ipa_hwio_def_ipa_gsi_fifo_status_ctrl_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_tlv_fifo_status_s {
+	u32	fifo_wr_ptr : 8;
+	u32	fifo_rd_ptr : 8;
+	u32	fifo_rd_pub_ptr : 8;
+	u32	fifo_empty : 1;
+	u32	fifo_empty_pub : 1;
+	u32	fifo_almost_full : 1;
+	u32	fifo_full : 1;
+	u32	fifo_almost_full_pub : 1;
+	u32	fifo_full_pub : 1;
+	u32	fifo_head_is_bubble : 1;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_gsi_tlv_fifo_status_u {
+	struct ipa_hwio_def_ipa_gsi_tlv_fifo_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_gsi_aos_fifo_status_s {
+	u32	fifo_wr_ptr : 8;
+	u32	fifo_rd_ptr : 8;
+	u32	fifo_rd_pub_ptr : 8;
+	u32	fifo_empty : 1;
+	u32	fifo_empty_pub : 1;
+	u32	fifo_almost_full : 1;
+	u32	fifo_full : 1;
+	u32	fifo_almost_full_pub : 1;
+	u32	fifo_full_pub : 1;
+	u32	fifo_head_is_bubble : 1;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_gsi_aos_fifo_status_u {
+	struct ipa_hwio_def_ipa_gsi_aos_fifo_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_0_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_0_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_1_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_1_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_1_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_2_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_2_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_2_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_3_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_3_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_3_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_4_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_4_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_4_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_5_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_5_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_5_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_6_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_6_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_6_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_comp_val_7_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_comp_val_7_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_comp_val_7_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_0_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_0_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_1_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_1_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_1_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_2_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_2_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_2_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_3_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_3_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_3_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_4_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_4_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_4_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_5_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_5_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_5_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_6_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_6_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_6_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_sw_mask_val_7_s {
+	u32 value : 32;
+};
+union ipa_hwio_def_ipa_log_buf_sw_mask_val_7_u {
+	struct ipa_hwio_def_ipa_log_buf_sw_mask_val_7_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 5;
+	u32	rd_req : 1;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_data_rd_0_s {
+	u32	cmdq_ctx_id_f : 4;
+	u32	cmdq_src_id_f : 8;
+	u32	cmdq_src_pipe_f : 5;
+	u32	cmdq_opcode_f : 2;
+	u32	cmdq_rep_f : 1;
+	u32	reserved0 : 12;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_data_rd_0_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_data_rd_0_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_status_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_empty_s {
+	u32	cmdq_empty : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_empty_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_ntf_tx_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_ntf_tx_cmdq_count_u {
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_count_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_s {
+	u32	write_cmd : 1;
+	u32	pop_cmd : 1;
+	u32	cmd_client : 5;
+	u32	rd_req : 1;
+	u32	reserved0 : 24;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_data_rd_s {
+	u32	cmdq_src_id : 8;
+	u32	cmdq_length : 16;
+	u32	cmdq_origin : 1;
+	u32	cmdq_sent : 1;
+	u32	cmdq_src_id_valid : 1;
+	u32	cmdq_userdata : 5;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_data_rd_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_data_rd_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_s {
+	u32	status : 1;
+	u32	cmdq_full : 1;
+	u32	cmdq_depth : 7;
+	u32	reserved0 : 23;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_empty_s {
+	u32	cmdq_empty : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_empty_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_empty_s
+		def;
+	u32 value;
+};
+struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_count_s {
+	u32	fifo_count : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_prod_ackmngr_cmdq_count_u {
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_count_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_spare_reg_1_s {
+	u32	spare_bit0 : 1;
+	u32	spare_bit1 : 1;
+	u32	genqmb_aooowr : 1;
+	u32	spare_bit3 : 1;
+	u32	spare_bit4 : 1;
+	u32	acl_inorder_multi_disable : 1;
+	u32	acl_dispatcher_frag_notif_check_disable : 1;
+	u32	acl_dispatcher_frag_notif_check_each_cmd_disable : 1;
+	u32	spare_bit8 : 1;
+	u32	acl_dispatcher_frag_notif_check_notif_mid_disable : 1;
+	u32	acl_dispatcher_pkt_check_disable : 1;
+	u32	tx_gives_sspnd_ack_on_open_aggr_frame : 1;
+	u32	spare_bit12 : 1;
+	u32	tx_block_aggr_query_on_holb_packet : 1;
+	u32	frag_mngr_fairness_eviction_on_constructing : 1;
+	u32	rx_cmdq_splitter_cmdq_pending_mux_disable : 1;
+	u32	qmb_ram_rd_cache_disable : 1;
+	u32	rx_stall_on_mbim_deaggr_error : 1;
+	u32	rx_stall_on_gen_deaggr_error : 1;
+	u32	spare_bit19 : 1;
+	u32	revert_warb_fix : 1;
+	u32	gsi_if_out_of_buf_stop_reset_mask_enable : 1;
+	u32	bam_idle_in_ipa_misc_cgc_en : 1;
+	u32	spare_bit23 : 1;
+	u32	spare_bit24 : 1;
+	u32	spare_bit25 : 1;
+	u32	ram_slaveway_access_protection_disable : 1;
+	u32	dcph_ram_rd_prefetch_disable : 1;
+	u32	warb_force_arb_round_finish_special_disable : 1;
+	u32	spare_ackinj_pipe8_mask_enable : 1;
+	u32	spare_bit30 : 1;
+	u32	spare_bit31 : 1;
+};
+union ipa_hwio_def_ipa_spare_reg_1_u {
+	struct ipa_hwio_def_ipa_spare_reg_1_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_spare_reg_2_s {
+	u32	tx_bresp_inj_with_flop : 1;
+	u32	cmdq_split_not_wait_data_desc_prior_hdr_push : 1;
+	u32	spare_bits : 30;
+};
+union ipa_hwio_def_ipa_spare_reg_2_u {
+	struct ipa_hwio_def_ipa_spare_reg_2_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_endp_gsi_cfg1_n_s {
+	u32	reserved0 : 16;
+	u32	endp_en : 1;
+	u32	reserved1 : 14;
+	u32	init_endp : 1;
+};
+union ipa_hwio_def_ipa_endp_gsi_cfg1_n_u {
+	struct ipa_hwio_def_ipa_endp_gsi_cfg1_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_gsi_cfg_tlv_n_s {
+	u32	fifo_base_addr : 16;
+	u32	fifo_size : 8;
+	u32	reserved0 : 8;
+};
+union ipa_hwio_def_ipa_endp_gsi_cfg_tlv_n_u {
+	struct ipa_hwio_def_ipa_endp_gsi_cfg_tlv_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_endp_gsi_cfg_aos_n_s {
+	u32	fifo_base_addr : 16;
+	u32	fifo_size : 8;
+	u32	reserved0 : 8;
+};
+union ipa_hwio_def_ipa_endp_gsi_cfg_aos_n_u {
+	struct ipa_hwio_def_ipa_endp_gsi_cfg_aos_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_ctxh_ctrl_s {
+	u32	ctxh_lock_id : 4;
+	u32	reserved0 : 27;
+	u32	ctxh_lock : 1;
+};
+union ipa_hwio_def_ipa_ctxh_ctrl_u {
+	struct ipa_hwio_def_ipa_ctxh_ctrl_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_irq_stts_ee_n_s {
+	u32	bad_snoc_access_irq : 1;
+	u32	reserved0 : 1;
+	u32	uc_irq_0 : 1;
+	u32	uc_irq_1 : 1;
+	u32	uc_irq_2 : 1;
+	u32	uc_irq_3 : 1;
+	u32	uc_in_q_not_empty_irq : 1;
+	u32	uc_rx_cmd_q_not_full_irq : 1;
+	u32	proc_to_uc_ack_q_not_empty_irq : 1;
+	u32	rx_err_irq : 1;
+	u32	deaggr_err_irq : 1;
+	u32	tx_err_irq : 1;
+	u32	step_mode_irq : 1;
+	u32	proc_err_irq : 1;
+	u32	tx_suspend_irq : 1;
+	u32	tx_holb_drop_irq : 1;
+	u32	bam_gsi_idle_irq : 1;
+	u32	pipe_yellow_marker_below_irq : 1;
+	u32	pipe_red_marker_below_irq : 1;
+	u32	pipe_yellow_marker_above_irq : 1;
+	u32	pipe_red_marker_above_irq : 1;
+	u32	ucp_irq : 1;
+	u32	reserved1 : 1;
+	u32	gsi_ee_irq : 1;
+	u32	gsi_ipa_if_tlv_rcvd_irq : 1;
+	u32	gsi_uc_irq : 1;
+	u32	tlv_len_min_dsm_irq : 1;
+	u32	reserved2 : 5;
+};
+union ipa_hwio_def_ipa_irq_stts_ee_n_u {
+	struct ipa_hwio_def_ipa_irq_stts_ee_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_irq_en_ee_n_s {
+	u32	bad_snoc_access_irq_en : 1;
+	u32	reserved0 : 1;
+	u32	uc_irq_0_irq_en : 1;
+	u32	uc_irq_1_irq_en : 1;
+	u32	uc_irq_2_irq_en : 1;
+	u32	uc_irq_3_irq_en : 1;
+	u32	uc_in_q_not_empty_irq_en : 1;
+	u32	uc_rx_cmd_q_not_full_irq_en : 1;
+	u32	proc_to_uc_ack_q_not_empty_irq_en : 1;
+	u32	rx_err_irq_en : 1;
+	u32	deaggr_err_irq_en : 1;
+	u32	tx_err_irq_en : 1;
+	u32	step_mode_irq_en : 1;
+	u32	proc_err_irq_en : 1;
+	u32	tx_suspend_irq_en : 1;
+	u32	tx_holb_drop_irq_en : 1;
+	u32	bam_gsi_idle_irq_en : 1;
+	u32	pipe_yellow_marker_below_irq_en : 1;
+	u32	pipe_red_marker_below_irq_en : 1;
+	u32	pipe_yellow_marker_above_irq_en : 1;
+	u32	pipe_red_marker_above_irq_en : 1;
+	u32	ucp_irq_en : 1;
+	u32	reserved1 : 1;
+	u32	gsi_ee_irq_en : 1;
+	u32	gsi_ipa_if_tlv_rcvd_irq_en : 1;
+	u32	gsi_uc_irq_en : 1;
+	u32	tlv_len_min_dsm_irq_en : 1;
+	u32	reserved2 : 5;
+};
+union ipa_hwio_def_ipa_irq_en_ee_n_u {
+	struct ipa_hwio_def_ipa_irq_en_ee_n_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_snoc_fec_ee_n_s {
+	u32	client : 8;
+	u32	qmb_index : 1;
+	u32	reserved0 : 3;
+	u32	tid : 4;
+	u32	reserved1 : 15;
+	u32	read_not_write : 1;
+};
+union ipa_hwio_def_ipa_snoc_fec_ee_n_u {
+	struct ipa_hwio_def_ipa_snoc_fec_ee_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_fec_addr_ee_n_s {
+	u32 addr : 32;
+};
+union ipa_hwio_def_ipa_fec_addr_ee_n_u {
+	struct ipa_hwio_def_ipa_fec_addr_ee_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_fec_attr_ee_n_s {
+	u32	opcode : 6;
+	u32	error_info : 26;
+};
+union ipa_hwio_def_ipa_fec_attr_ee_n_u {
+	struct ipa_hwio_def_ipa_fec_attr_ee_n_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_suspend_irq_info_ee_n_s {
+	u32	endpoints : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_suspend_irq_info_ee_n_u {
+	struct ipa_hwio_def_ipa_suspend_irq_info_ee_n_s def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_suspend_irq_en_ee_n_s {
+	u32	endpoints : 31;
+	u32	reserved0 : 1;
+};
+union ipa_hwio_def_ipa_suspend_irq_en_ee_n_u {
+	struct ipa_hwio_def_ipa_suspend_irq_en_ee_n_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_holb_drop_irq_info_ee_n_s {
+	u32	reserved0 : 13;
+	u32	endpoints : 18;
+	u32	reserved1 : 1;
+};
+union ipa_hwio_def_ipa_holb_drop_irq_info_ee_n_u {
+	struct ipa_hwio_def_ipa_holb_drop_irq_info_ee_n_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_addr_s {
+	u32 start_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_status_addr_u {
+	struct ipa_hwio_def_ipa_log_buf_status_addr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_addr_msb_s {
+	u32 start_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_status_addr_msb_u {
+	struct ipa_hwio_def_ipa_log_buf_status_addr_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_write_ptr_s {
+	u32 write_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_status_write_ptr_u {
+	struct ipa_hwio_def_ipa_log_buf_status_write_ptr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_write_ptr_msb_s {
+	u32 write_addr : 32;
+};
+union ipa_hwio_def_ipa_log_buf_status_write_ptr_msb_u {
+	struct ipa_hwio_def_ipa_log_buf_status_write_ptr_msb_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_cfg_s {
+	u32	size : 16;
+	u32	enable : 1;
+	u32	reserved0 : 15;
+};
+union ipa_hwio_def_ipa_log_buf_status_cfg_u {
+	struct ipa_hwio_def_ipa_log_buf_status_cfg_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_log_buf_status_ram_ptr_s {
+	u32	read_ptr : 16;
+	u32	write_ptr : 16;
+};
+union ipa_hwio_def_ipa_log_buf_status_ram_ptr_u {
+	struct ipa_hwio_def_ipa_log_buf_status_ram_ptr_s	def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_sys_addr_s {
+	u32 addr : 32;
+};
+union ipa_hwio_def_ipa_uc_qmb_sys_addr_u {
+	struct ipa_hwio_def_ipa_uc_qmb_sys_addr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_sys_addr_msb_s {
+	u32 addr_msb : 32;
+};
+union ipa_hwio_def_ipa_uc_qmb_sys_addr_msb_u {
+	struct ipa_hwio_def_ipa_uc_qmb_sys_addr_msb_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_local_addr_s {
+	u32	addr : 18;
+	u32	reserved0 : 14;
+};
+union ipa_hwio_def_ipa_uc_qmb_local_addr_u {
+	struct ipa_hwio_def_ipa_uc_qmb_local_addr_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_length_s {
+	u32	length : 7;
+	u32	reserved0 : 25;
+};
+union ipa_hwio_def_ipa_uc_qmb_length_u {
+	struct ipa_hwio_def_ipa_uc_qmb_length_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_trigger_s {
+	u32	direction : 1;
+	u32	reserved0 : 3;
+	u32	posting : 2;
+	u32	reserved1 : 26;
+};
+union ipa_hwio_def_ipa_uc_qmb_trigger_u {
+	struct ipa_hwio_def_ipa_uc_qmb_trigger_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_pending_tid_s {
+	u32	tid : 6;
+	u32	reserved0 : 2;
+	u32	error_bus : 1;
+	u32	reserved1 : 3;
+	u32	error_max_os : 1;
+	u32	reserved2 : 3;
+	u32	error_max_comp : 1;
+	u32	reserved3 : 3;
+	u32	error_security : 1;
+	u32	reserved4 : 11;
+};
+union ipa_hwio_def_ipa_uc_qmb_pending_tid_u {
+	struct ipa_hwio_def_ipa_uc_qmb_pending_tid_s	def;
+	u32						value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_completed_rd_fifo_peek_s {
+	u32	tid : 6;
+	u32	reserved0 : 2;
+	u32	error : 1;
+	u32	reserved1 : 3;
+	u32	valid : 1;
+	u32	reserved2 : 19;
+};
+union ipa_hwio_def_ipa_uc_qmb_completed_rd_fifo_peek_u {
+	struct ipa_hwio_def_ipa_uc_qmb_completed_rd_fifo_peek_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_completed_wr_fifo_peek_s {
+	u32	tid : 6;
+	u32	reserved0 : 2;
+	u32	error : 1;
+	u32	reserved1 : 3;
+	u32	valid : 1;
+	u32	reserved2 : 19;
+};
+union ipa_hwio_def_ipa_uc_qmb_completed_wr_fifo_peek_u {
+	struct ipa_hwio_def_ipa_uc_qmb_completed_wr_fifo_peek_s def;
+	u32							value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_misc_s {
+	u32	user : 10;
+	u32	reserved0 : 2;
+	u32	rd_priority : 2;
+	u32	reserved1 : 2;
+	u32	wr_priority : 2;
+	u32	reserved2 : 2;
+	u32	ooord : 1;
+	u32	reserved3 : 3;
+	u32	ooowr : 1;
+	u32	reserved4 : 3;
+	u32	swap : 1;
+	u32	irq_coal : 1;
+	u32	posted_stall : 1;
+	u32	qmb_hready_bcr : 1;
+};
+union ipa_hwio_def_ipa_uc_qmb_misc_u {
+	struct ipa_hwio_def_ipa_uc_qmb_misc_s	def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_status_s {
+	u32	max_outstanding_rd : 4;
+	u32	outstanding_rd_cnt : 4;
+	u32	completed_rd_cnt : 4;
+	u32	completed_rd_fifo_full : 1;
+	u32	reserved0 : 3;
+	u32	max_outstanding_wr : 4;
+	u32	outstanding_wr_cnt : 4;
+	u32	completed_wr_cnt : 4;
+	u32	completed_wr_fifo_full : 1;
+	u32	reserved1 : 3;
+};
+union ipa_hwio_def_ipa_uc_qmb_status_u {
+	struct ipa_hwio_def_ipa_uc_qmb_status_s def;
+	u32					value;
+};
+struct ipa_hwio_def_ipa_uc_qmb_bus_attrib_s {
+	u32	memtype : 3;
+	u32	reserved0 : 1;
+	u32	noallocate : 1;
+	u32	reserved1 : 3;
+	u32	innershared : 1;
+	u32	reserved2 : 3;
+	u32	shared : 1;
+	u32	reserved3 : 19;
+};
+union ipa_hwio_def_ipa_uc_qmb_bus_attrib_u {
+	struct ipa_hwio_def_ipa_uc_qmb_bus_attrib_s	def;
+	u32						value;
+};
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_pkt_cntxt.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_pkt_cntxt.h
new file mode 100644
index 0000000..ab31a4f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_pkt_cntxt.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_PKT_CNTXT_H_)
+#define _IPA_PKT_CNTXT_H_
+
+#define IPA_HW_PKT_CTNTX_MAX        0x10
+#define IPA_HW_NUM_SAVE_PKT_CTNTX   0x8
+#define IPA_HW_PKT_CTNTX_START_ADDR 0xE434CA00
+#define IPA_HW_PKT_CTNTX_SIZE       (sizeof(ipa_pkt_ctntx_opcode_state_s) + \
+				     sizeof(ipa_pkt_ctntx_u))
+
+/*
+ * Packet Context States
+ */
+enum ipa_hw_pkt_cntxt_state_e {
+	IPA_HW_PKT_CNTXT_STATE_HFETCHER_INIT = 1,
+	IPA_HW_PKT_CNTXT_STATE_HFETCHER_DMAR,
+	IPA_HW_PKT_CNTXT_STATE_HFETCHER_DMAR_REP,
+	IPA_HW_PKT_CNTXT_STATE_H_DCPH,
+	IPA_HW_PKT_CNTXT_STATE_PKT_PARSER,
+	IPA_HW_PKT_CNTXT_STATE_FILTER_NAT,
+	IPA_HW_PKT_CNTXT_STATE_ROUTER,
+	IPA_HW_PKT_CNTXT_STATE_HDRI,
+	IPA_HW_PKT_CNTXT_STATE_UCP,
+	IPA_HW_PKT_CNTXT_STATE_ENQUEUER,
+	IPA_HW_PKT_CNTXT_STATE_DFETCHER,
+	IPA_HW_PKT_CNTXT_STATE_D_DCPH,
+	IPA_HW_PKT_CNTXT_STATE_DISPATCHER,
+	IPA_HW_PKT_CNTXT_STATE_TX,
+	IPA_HW_PKT_CNTXT_STATE_TX_ZLT,
+	IPA_HW_PKT_CNTXT_STATE_DFETCHER_DMAR,
+	IPA_HW_PKT_CNTXT_STATE_DCMP,
+};
+
+/*
+ * Packet Context fields as received from VI/Design
+ */
+struct ipa_pkt_ctntx_s {
+	u64	opcode                           : 8;
+	u64	state                            : 5;
+	u64	not_used_1                       : 2;
+	u64	tx_pkt_dma_done                  : 1;
+	u64	exc_deagg                        : 1;
+	u64	exc_pkt_version                  : 1;
+	u64	exc_pkt_len                      : 1;
+	u64	exc_threshold                    : 1;
+	u64	exc_sw                           : 1;
+	u64	exc_nat                          : 1;
+	u64	exc_frag_miss                    : 1;
+	u64	filter_bypass                    : 1;
+	u64	router_bypass                    : 1;
+	u64	nat_bypass                       : 1;
+	u64	hdri_bypass                      : 1;
+	u64	dcph_bypass                      : 1;
+	u64	security_credentials_select      : 1;
+	u64	pkt_2nd_pass                     : 1;
+	u64	xlat_bypass                      : 1;
+	u64	dcph_valid                       : 1;
+	u64	ucp_on                           : 1;
+	u64	replication                      : 1;
+	u64	src_status_en                    : 1;
+	u64	dest_status_en                   : 1;
+	u64	frag_status_en                   : 1;
+	u64	eot_dest                         : 1;
+	u64	eot_notif                        : 1;
+	u64	prev_eot_dest                    : 1;
+	u64	src_hdr_len                      : 8;
+	u64	tx_valid_sectors                 : 8;
+	u64	rx_flags                         : 8;
+	u64	rx_packet_length                 : 16;
+	u64	revised_packet_length            : 16;
+	u64	frag_en                          : 1;
+	u64	frag_bypass                      : 1;
+	u64	frag_process                     : 1;
+	u64	notif_pipe                       : 5;
+	u64	src_id                           : 8;
+	u64	tx_pkt_transferred               : 1;
+	u64	src_pipe                         : 5;
+	u64	dest_pipe                        : 5;
+	u64	frag_pipe                        : 5;
+	u64	ihl_offset                       : 8;
+	u64	protocol                         : 8;
+	u64	tos                              : 8;
+	u64	id                               : 16;
+	u64	v6_reserved                      : 4;
+	u64	ff                               : 1;
+	u64	mf                               : 1;
+	u64	pkt_israg                        : 1;
+	u64	tx_holb_timer_overflow           : 1;
+	u64	tx_holb_timer_running            : 1;
+	u64	trnseq_0                         : 3;
+	u64	trnseq_1                         : 3;
+	u64	trnseq_2                         : 3;
+	u64	trnseq_3                         : 3;
+	u64	trnseq_4                         : 3;
+	u64	trnseq_ex_length                 : 8;
+	u64	trnseq_4_length                  : 8;
+	u64	trnseq_4_offset                  : 8;
+	u64	dps_tx_pop_cnt                   : 2;
+	u64	dps_tx_push_cnt                  : 2;
+	u64	vol_ic_dcph_cfg                  : 1;
+	u64	vol_ic_tag_stts                  : 1;
+	u64	vol_ic_pxkt_init_e               : 1;
+	u64	vol_ic_pkt_init                  : 1;
+	u64	tx_holb_counter                  : 32;
+	u64	trnseq_0_length                  : 8;
+	u64	trnseq_0_offset                  : 8;
+	u64	trnseq_1_length                  : 8;
+	u64	trnseq_1_offset                  : 8;
+	u64	trnseq_2_length                  : 8;
+	u64	trnseq_2_offset                  : 8;
+	u64	trnseq_3_length                  : 8;
+	u64	trnseq_3_offset                  : 8;
+	u64	dmar_valid_length                : 16;
+	u64	dcph_valid_length                : 16;
+	u64	frag_hdr_offset                  : 9;
+	u64	ip_payload_offset                : 9;
+	u64	frag_rule                        : 4;
+	u64	frag_table                       : 1;
+	u64	frag_hit                         : 1;
+	u64	data_cmdq_ptr                    : 8;
+	u64	filter_result                    : 6;
+	u64	router_result                    : 6;
+	u64	nat_result                       : 6;
+	u64	hdri_result                      : 6;
+	u64	dcph_result                      : 6;
+	u64	dcph_result_valid                : 1;
+	u32	not_used_2                       : 4;
+	u64	tx_pkt_suspended                 : 1;
+	u64	tx_pkt_dropped                   : 1;
+	u32	not_used_3                       : 3;
+	u64	metadata_valid                   : 1;
+	u64	metadata_type                    : 4;
+	u64	ul_cs_start_diff                 : 9;
+	u64	cs_disable_trlr_vld_bit          : 1;
+	u64	cs_required                      : 1;
+	u64	dest_hdr_len                     : 8;
+	u64	fr_l                             : 1;
+	u64	fl_h                             : 1;
+	u64	fr_g                             : 1;
+	u64	fr_ret                           : 1;
+	u64	fr_rule_id                       : 10;
+	u64	rt_l                             : 1;
+	u64	rt_h                             : 1;
+	u64	rtng_tbl_index                   : 5;
+	u64	rt_match                         : 1;
+	u64	rt_rule_id                       : 10;
+	u64	nat_tbl_index                    : 13;
+	u64	nat_type                         : 2;
+	u64	hdr_l                            : 1;
+	u64	header_offset                    : 10;
+	u64	not_used_4                       : 1;
+	u64	filter_result_valid              : 1;
+	u64	router_result_valid              : 1;
+	u64	nat_result_valid                 : 1;
+	u64	hdri_result_valid                : 1;
+	u64	not_used_5                       : 1;
+	u64	stream_id                        : 8;
+	u64	not_used_6                       : 6;
+	u64	dcph_context_index               : 2;
+	u64	dcph_cfg_size                    : 16;
+	u64	dcph_cfg_count                   : 32;
+	u64	tag_info                         : 48;
+	u64	ucp_cmd_id                       : 16;
+	u64	metadata                         : 32;
+	u64	ucp_cmd_params                   : 32;
+	u64	nat_ip_address                   : 32;
+	u64	nat_ip_cs_diff                   : 16;
+	u64	frag_dest_pipe                   : 5;
+	u64	frag_nat_type                    : 2;
+	u64	fragr_ret                        : 1;
+	u64	frag_protocol                    : 8;
+	u64	src_ip_address                   : 32;
+	u64	dest_ip_address                  : 32;
+	u64	not_used_7                       : 37;
+	u64	frag_hdr_l                       : 1;
+	u64	frag_header_offset               : 10;
+	u64	frag_id                          : 16;
+} __packed;
+
+#endif /* #if !defined(_IPA_PKT_CNTXT_H_) */
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
new file mode 100644
index 0000000..0b498e0
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
@@ -0,0 +1,1519 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#include "ipa_reg_dump.h"
+#include "ipa_access_control.h"
+
+/* Total size required for test bus */
+#define IPA_MEM_OVERLAY_SIZE     0x66000
+
+/*
+ * The following structure contains a hierarchy of structures that
+ * ultimately leads to a series of leafs. The leafs are structures
+ * containing detailed, bit level, register definitions.
+ */
+static struct regs_save_hierarchy_s ipa_reg_save;
+
+static unsigned int ipa_testbus_mem[IPA_MEM_OVERLAY_SIZE];
+
+/*
+ * The following data structure contains a list of the registers
+ * (whose data are to be copied) and the locations (within
+ * ipa_reg_save above) into which the registers' values need to be
+ * copied.
+ */
+static struct map_src_dst_addr_s ipa_regs_to_save_array[] = {
+	/*
+	 * =====================================================================
+	 * IPA register definitions begin here...
+	 * =====================================================================
+	 */
+
+	/* IPA General Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE,
+			     ipa.gen,
+			     ipa_state),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_RX_ACTIVE,
+			     ipa.gen,
+			     ipa_state_rx_active),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_TX_WRAPPER,
+			     ipa.gen,
+			     ipa_state_tx_wrapper),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_TX0,
+			     ipa.gen,
+			     ipa_state_tx0),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_TX1,
+			     ipa.gen,
+			     ipa_state_tx1),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_AGGR_ACTIVE,
+			     ipa.gen,
+			     ipa_state_aggr_active),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_DFETCHER,
+			     ipa.gen,
+			     ipa_state_dfetcher),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_FETCHER_MASK_0,
+			     ipa.gen,
+			     ipa_state_fetcher_mask_0),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_FETCHER_MASK_1,
+			     ipa.gen,
+			     ipa_state_fetcher_mask_1),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_GSI_AOS,
+			     ipa.gen,
+			     ipa_state_gsi_aos),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_GSI_IF,
+			     ipa.gen,
+			     ipa_state_gsi_if),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_GSI_SKIP,
+			     ipa.gen,
+			     ipa_state_gsi_skip),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_GSI_TLV,
+			     ipa.gen,
+			     ipa_state_gsi_tlv),
+	GEN_SRC_DST_ADDR_MAP(IPA_DPL_TIMER_LSB,
+			     ipa.gen,
+			     ipa_dpl_timer_lsb),
+	GEN_SRC_DST_ADDR_MAP(IPA_DPL_TIMER_MSB,
+			     ipa.gen,
+			     ipa_dpl_timer_msb),
+	GEN_SRC_DST_ADDR_MAP(IPA_PROC_IPH_CFG,
+			     ipa.gen,
+			     ipa_proc_iph_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_ROUTE,
+			     ipa.gen,
+			     ipa_route),
+	GEN_SRC_DST_ADDR_MAP(IPA_SPARE_REG_1,
+			     ipa.gen,
+			     ipa_spare_reg_1),
+	GEN_SRC_DST_ADDR_MAP(IPA_SPARE_REG_2,
+			     ipa.gen,
+			     ipa_spare_reg_2),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG,
+			     ipa.gen,
+			     ipa_log),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_STATUS_CFG,
+			     ipa.gen,
+			     ipa_log_buf_status_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_STATUS_ADDR,
+			     ipa.gen,
+			     ipa_log_buf_status_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_STATUS_WRITE_PTR,
+			     ipa.gen,
+			     ipa_log_buf_status_write_ptr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_STATUS_RAM_PTR,
+			     ipa.gen,
+			     ipa_log_buf_status_ram_ptr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_CMD_CFG,
+			     ipa.gen,
+			     ipa_log_buf_hw_cmd_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_CMD_ADDR,
+			     ipa.gen,
+			     ipa_log_buf_hw_cmd_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_CMD_WRITE_PTR,
+			     ipa.gen,
+			     ipa_log_buf_hw_cmd_write_ptr),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_CMD_RAM_PTR,
+			     ipa.gen,
+			     ipa_log_buf_hw_cmd_ram_ptr),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_DPL_FIFO,
+			     ipa.gen,
+			     ipa_state_dpl_fifo),
+	GEN_SRC_DST_ADDR_MAP(IPA_COMP_HW_VERSION,
+			     ipa.gen,
+			     ipa_comp_hw_version),
+	GEN_SRC_DST_ADDR_MAP(IPA_FILT_ROUT_HASH_EN,
+			     ipa.gen,
+			     ipa_filt_rout_hash_en),
+	GEN_SRC_DST_ADDR_MAP(IPA_FILT_ROUT_HASH_FLUSH,
+			     ipa.gen,
+			     ipa_filt_rout_hash_flush),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_FETCHER,
+			     ipa.gen,
+			     ipa_state_fetcher),
+	GEN_SRC_DST_ADDR_MAP(IPA_IPV4_FILTER_INIT_VALUES,
+			     ipa.gen,
+			     ipa_ipv4_filter_init_values),
+	GEN_SRC_DST_ADDR_MAP(IPA_IPV6_FILTER_INIT_VALUES,
+			     ipa.gen,
+			     ipa_ipv6_filter_init_values),
+	GEN_SRC_DST_ADDR_MAP(IPA_IPV4_ROUTE_INIT_VALUES,
+			     ipa.gen,
+			     ipa_ipv4_route_init_values),
+	GEN_SRC_DST_ADDR_MAP(IPA_IPV6_ROUTE_INIT_VALUES,
+			     ipa.gen,
+			     ipa_ipv6_route_init_values),
+	GEN_SRC_DST_ADDR_MAP(IPA_BAM_ACTIVATED_PORTS,
+			     ipa.gen,
+			     ipa_bam_activated_ports),
+	GEN_SRC_DST_ADDR_MAP(IPA_TX_COMMANDER_CMDQ_STATUS,
+			     ipa.gen,
+			     ipa_tx_commander_cmdq_status),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_SNIF_EL_EN,
+			     ipa.gen,
+			     ipa_log_buf_hw_snif_el_en),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_SNIF_EL_WR_N_RD_SEL,
+			     ipa.gen,
+			     ipa_log_buf_hw_snif_el_wr_n_rd_sel),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOG_BUF_HW_SNIF_EL_CLI_MUX,
+			     ipa.gen,
+			     ipa_log_buf_hw_snif_el_cli_mux),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_ACL,
+			     ipa.gen,
+			     ipa_state_acl),
+	GEN_SRC_DST_ADDR_MAP(IPA_SYS_PKT_PROC_CNTXT_BASE,
+			     ipa.gen,
+			     ipa_sys_pkt_proc_cntxt_base),
+	GEN_SRC_DST_ADDR_MAP(IPA_SYS_PKT_PROC_CNTXT_BASE_MSB,
+			     ipa.gen,
+			     ipa_sys_pkt_proc_cntxt_base_msb),
+	GEN_SRC_DST_ADDR_MAP(IPA_LOCAL_PKT_PROC_CNTXT_BASE,
+			     ipa.gen,
+			     ipa_local_pkt_proc_cntxt_base),
+	GEN_SRC_DST_ADDR_MAP(IPA_RSRC_GRP_CFG,
+			     ipa.gen,
+			     ipa_rsrc_grp_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_PIPELINE_DISABLE,
+			     ipa.gen,
+			     ipa_pipeline_disable),
+	GEN_SRC_DST_ADDR_MAP(IPA_COMP_CFG,
+			     ipa.gen,
+			     ipa_comp_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_STATE_NLO_AGGR,
+			     ipa.gen,
+			     ipa_state_nlo_aggr),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_PP_CFG1,
+			     ipa.gen,
+			     ipa_nlo_pp_cfg1),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_PP_CFG2,
+			     ipa.gen,
+			     ipa_nlo_pp_cfg2),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_PP_ACK_LIMIT_CFG,
+			     ipa.gen,
+			     ipa_nlo_pp_ack_limit_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_PP_DATA_LIMIT_CFG,
+			     ipa.gen,
+			     ipa_nlo_pp_data_limit_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_MIN_DSM_CFG,
+			     ipa.gen,
+			     ipa_nlo_min_dsm_cfg),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_FLUSH_REQ,
+			     ipa.gen,
+			     ipa_nlo_vp_flush_req),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_FLUSH_COOKIE,
+			     ipa.gen,
+			     ipa_nlo_vp_flush_cookie),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_FLUSH_ACK,
+			     ipa.gen,
+			     ipa_nlo_vp_flush_ack),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_DSM_OPEN,
+			     ipa.gen,
+			     ipa_nlo_vp_dsm_open),
+	GEN_SRC_DST_ADDR_MAP(IPA_NLO_VP_QBAP_OPEN,
+			     ipa.gen,
+			     ipa_nlo_vp_qbap_open),
+
+	/* Debug Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_DEBUG_DATA,
+			     ipa.dbg,
+			     ipa_debug_data),
+	GEN_SRC_DST_ADDR_MAP(IPA_STEP_MODE_BREAKPOINTS,
+			     ipa.dbg,
+			     ipa_step_mode_breakpoints),
+	GEN_SRC_DST_ADDR_MAP(IPA_STEP_MODE_STATUS,
+			     ipa.dbg,
+			     ipa_step_mode_status),
+
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_CMD_n, ipa_rx_splt_cmdq_cmd_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_CFG_n, ipa_rx_splt_cmdq_cfg_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_WR_0_n, ipa_rx_splt_cmdq_data_wr_0_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_WR_1_n, ipa_rx_splt_cmdq_data_wr_1_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_WR_2_n, ipa_rx_splt_cmdq_data_wr_2_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_WR_3_n, ipa_rx_splt_cmdq_data_wr_3_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_RD_0_n, ipa_rx_splt_cmdq_data_rd_0_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_RD_1_n, ipa_rx_splt_cmdq_data_rd_1_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_RD_2_n, ipa_rx_splt_cmdq_data_rd_2_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_DATA_RD_3_n, ipa_rx_splt_cmdq_data_rd_3_n),
+	IPA_REG_SAVE_RX_SPLT_CMDQ(
+		IPA_RX_SPLT_CMDQ_STATUS_n, ipa_rx_splt_cmdq_status_n),
+
+
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_CMD,
+			     ipa.dbg,
+			     ipa_rx_hps_cmdq_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_STATUS_EMPTY,
+			     ipa.dbg,
+			     ipa_rx_hps_cmdq_status_empty),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CLIENTS_MIN_DEPTH_0,
+			     ipa.dbg,
+			     ipa_rx_hps_clients_min_depth_0),
+	GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+			     ipa.dbg,
+			     ipa_rx_hps_clients_max_depth_0),
+	GEN_SRC_DST_ADDR_MAP(IPA_HPS_DPS_CMDQ_CMD,
+			     ipa.dbg,
+			     ipa_hps_dps_cmdq_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_HPS_DPS_CMDQ_STATUS_EMPTY,
+			     ipa.dbg,
+			     ipa_hps_dps_cmdq_status_empty),
+	GEN_SRC_DST_ADDR_MAP(IPA_DPS_TX_CMDQ_CMD,
+			     ipa.dbg,
+			     ipa_dps_tx_cmdq_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_DPS_TX_CMDQ_STATUS_EMPTY,
+			     ipa.dbg,
+			     ipa_dps_tx_cmdq_status_empty),
+	GEN_SRC_DST_ADDR_MAP(IPA_ACKMNGR_CMDQ_CMD,
+			     ipa.dbg,
+			     ipa_ackmngr_cmdq_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_ACKMNGR_CMDQ_STATUS_EMPTY,
+			     ipa.dbg,
+			     ipa_ackmngr_cmdq_status_empty),
+
+	/*
+	 * NOTE: That GEN_SRC_DST_ADDR_MAP() not used below.  This is
+	 *       because the following registers are not scaler, rather
+	 *       they are register arrays...
+	 */
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_IRQ_STTS_EE_n,
+				      ipa_irq_stts_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_IRQ_EN_EE_n,
+				      ipa_irq_en_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_FEC_ADDR_EE_n,
+				      ipa_fec_addr_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_FEC_ATTR_EE_n,
+				      ipa_fec_attr_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_SNOC_FEC_EE_n,
+				      ipa_snoc_fec_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_HOLB_DROP_IRQ_INFO_EE_n,
+				      ipa_holb_drop_irq_info_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_SUSPEND_IRQ_INFO_EE_n,
+				      ipa_suspend_irq_info_ee_n),
+	IPA_REG_SAVE_CFG_ENTRY_GEN_EE(IPA_SUSPEND_IRQ_EN_EE_n,
+				      ipa_suspend_irq_en_ee_n),
+
+	/* Pipe Endp Registers */
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_CTRL_n,
+					 ipa_endp_init_ctrl_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_CTRL_SCND_n,
+					 ipa_endp_init_ctrl_scnd_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_CFG_n,
+					 ipa_endp_init_cfg_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_NAT_n,
+					 ipa_endp_init_nat_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HDR_n,
+					 ipa_endp_init_hdr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HDR_EXT_n,
+					 ipa_endp_init_hdr_ext_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+					 ipa_endp_init_hdr_metadata_mask_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HDR_METADATA_n,
+					 ipa_endp_init_hdr_metadata_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_MODE_n,
+					 ipa_endp_init_mode_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_AGGR_n,
+					 ipa_endp_init_aggr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+					 ipa_endp_init_hol_block_en_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+					 ipa_endp_init_hol_block_timer_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_DEAGGR_n,
+					 ipa_endp_init_deaggr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_STATUS_n,
+					 ipa_endp_status_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_RSRC_GRP_n,
+					 ipa_endp_init_rsrc_grp_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_INIT_SEQ_n,
+					 ipa_endp_init_seq_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_GSI_CFG_TLV_n,
+					 ipa_endp_gsi_cfg_tlv_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_GSI_CFG_AOS_n,
+					 ipa_endp_gsi_cfg_aos_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_GSI_CFG1_n,
+					 ipa_endp_gsi_cfg1_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+					 ipa_endp_filter_router_hsh_cfg_n),
+
+	/* Source Resource Group Config Registers */
+	IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_GRP(IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					    ipa_src_rsrc_grp_01_rsrc_type_n),
+	IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_GRP(IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+					    ipa_src_rsrc_grp_23_rsrc_type_n),
+
+	/* Destination Resource Group Config Registers */
+	IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_GRP(IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					    ipa_dst_rsrc_grp_01_rsrc_type_n),
+	IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_GRP(IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+					    ipa_dst_rsrc_grp_23_rsrc_type_n),
+
+	/* Source Resource Group Count Registers */
+	IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_CNT_GRP(
+		IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n,
+		ipa_src_rsrc_grp_0123_rsrc_type_cnt_n),
+
+	/* Destination Resource Group Count Registers */
+	IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_CNT_GRP(
+		IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n,
+		ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n),
+
+	/*
+	 * =====================================================================
+	 * GSI register definitions begin here...
+	 * =====================================================================
+	 */
+
+	/* GSI General Registers */
+	GEN_SRC_DST_ADDR_MAP(GSI_CFG,
+			     gsi.gen,
+			     gsi_cfg),
+	GEN_SRC_DST_ADDR_MAP(GSI_REE_CFG,
+			     gsi.gen,
+			     gsi_ree_cfg),
+	IPA_REG_SAVE_GSI_VER(
+			     IPA_GSI_TOP_GSI_INST_RAM_n,
+			     ipa_gsi_top_gsi_inst_ram_n),
+
+	/* GSI Debug Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_BUSY_REG,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_busy_reg),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_EVENT_PENDING,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_event_pending),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_TIMER_PENDING,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_timer_pending),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_RD_WR_PENDING,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_rd_wr_pending),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_PC_FROM_SW,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_pc_from_sw),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_SW_STALL,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_sw_stall),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_PC_FOR_DEBUG,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_pc_for_debug),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID,
+			     gsi.debug,
+			     ipa_gsi_top_gsi_debug_qsb_log_err_trns_id),
+
+	/* GSI IRAM pointers Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_CH_CMD,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ch_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_EE_GENERIC_CMD,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_CH_DB,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ch_db),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_EV_DB,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ev_db),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_NEW_RE,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_new_re),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_CH_DIS_COMP,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ch_dis_comp),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_CH_EMPTY,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_ch_empty),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_EVENT_GEN_COMP,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_event_gen_comp),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_TIMER_EXPIRED,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_timer_expired),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_WRITE_ENG_COMP,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_write_eng_comp),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_READ_ENG_COMP,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_read_eng_comp),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_UC_GP_INT,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_uc_gp_int),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED,
+			     gsi.debug.gsi_iram_ptrs,
+			     ipa_gsi_top_gsi_iram_ptr_int_mod_stopped),
+
+	/* GSI SHRAM pointers Registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_GSI_TOP_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR,
+			     gsi.debug.gsi_shram_ptrs,
+			     ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr),
+
+	/*
+	 * NOTE: That GEN_SRC_DST_ADDR_MAP() not used below.  This is
+	 *       because the following registers are not scaler, rather
+	 *       they are register arrays...
+	 */
+
+	/* GSI General EE Registers */
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(GSI_MANAGER_EE_QOS_n,
+					      gsi_manager_ee_qos_n),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_GSI_STATUS,
+					      ee_n_gsi_status),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_TYPE_IRQ,
+					      ee_n_cntxt_type_irq),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_TYPE_IRQ_MSK,
+					      ee_n_cntxt_type_irq_msk),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_GSI_CH_IRQ,
+					      ee_n_cntxt_src_gsi_ch_irq),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_EV_CH_IRQ,
+					      ee_n_cntxt_src_ev_ch_irq),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK,
+					      ee_n_cntxt_src_gsi_ch_irq_msk),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_EV_CH_IRQ_MSK,
+					      ee_n_cntxt_src_ev_ch_irq_msk),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_IEOB_IRQ,
+					      ee_n_cntxt_src_ieob_irq),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SRC_IEOB_IRQ_MSK,
+					      ee_n_cntxt_src_ieob_irq_msk),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_GSI_IRQ_STTS,
+					      ee_n_cntxt_gsi_irq_stts),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_GLOB_IRQ_STTS,
+					      ee_n_cntxt_glob_irq_stts),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_ERROR_LOG,
+					      ee_n_error_log),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SCRATCH_0,
+					      ee_n_cntxt_scratch_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_SCRATCH_1,
+					      ee_n_cntxt_scratch_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_INTSET,
+					      ee_n_cntxt_intset),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_MSI_BASE_LSB,
+					      ee_n_cntxt_msi_base_lsb),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(EE_n_CNTXT_MSI_BASE_MSB,
+					      ee_n_cntxt_msi_base_msb),
+
+	/* GSI Channel Context Registers */
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_0,
+					    ee_n_gsi_ch_k_cntxt_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_1,
+					    ee_n_gsi_ch_k_cntxt_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_2,
+					    ee_n_gsi_ch_k_cntxt_2),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_3,
+					    ee_n_gsi_ch_k_cntxt_3),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_4,
+					    ee_n_gsi_ch_k_cntxt_4),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_5,
+					    ee_n_gsi_ch_k_cntxt_5),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_6,
+					    ee_n_gsi_ch_k_cntxt_6),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_CNTXT_7,
+					    ee_n_gsi_ch_k_cntxt_7),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
+					    ee_n_gsi_ch_k_re_fetch_read_ptr),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
+					    ee_n_gsi_ch_k_re_fetch_write_ptr),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_QOS,
+					    ee_n_gsi_ch_k_qos),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_0,
+					    ee_n_gsi_ch_k_scratch_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_1,
+					    ee_n_gsi_ch_k_scratch_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_2,
+					    ee_n_gsi_ch_k_scratch_2),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(EE_n_GSI_CH_k_SCRATCH_3,
+					    ee_n_gsi_ch_k_scratch_3),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(GSI_MAP_EE_n_CH_k_VP_TABLE,
+					    gsi_map_ee_n_ch_k_vp_table),
+
+	/* GSI Channel Event Context Registers */
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_0,
+					     ee_n_ev_ch_k_cntxt_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_1,
+					     ee_n_ev_ch_k_cntxt_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_2,
+					     ee_n_ev_ch_k_cntxt_2),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_3,
+					     ee_n_ev_ch_k_cntxt_3),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_4,
+					     ee_n_ev_ch_k_cntxt_4),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_5,
+					     ee_n_ev_ch_k_cntxt_5),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_6,
+					     ee_n_ev_ch_k_cntxt_6),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_7,
+					     ee_n_ev_ch_k_cntxt_7),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_8,
+					     ee_n_ev_ch_k_cntxt_8),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_9,
+					     ee_n_ev_ch_k_cntxt_9),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_10,
+					     ee_n_ev_ch_k_cntxt_10),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_11,
+					     ee_n_ev_ch_k_cntxt_11),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_12,
+					     ee_n_ev_ch_k_cntxt_12),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_CNTXT_13,
+					     ee_n_ev_ch_k_cntxt_13),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_SCRATCH_0,
+					     ee_n_ev_ch_k_scratch_0),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(EE_n_EV_CH_k_SCRATCH_1,
+					     ee_n_ev_ch_k_scratch_1),
+	IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(GSI_DEBUG_EE_n_EV_k_VP_TABLE,
+					     gsi_debug_ee_n_ev_k_vp_table),
+
+#if defined(CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS) && \
+	CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS > 0
+	/* Endp Registers for remaining pipes */
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_CTRL_n,
+					       ipa_endp_init_ctrl_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_CTRL_SCND_n,
+					       ipa_endp_init_ctrl_scnd_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_CFG_n,
+					       ipa_endp_init_cfg_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_NAT_n,
+					       ipa_endp_init_nat_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HDR_n,
+					       ipa_endp_init_hdr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HDR_EXT_n,
+					       ipa_endp_init_hdr_ext_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA
+		(IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+		ipa_endp_init_hdr_metadata_mask_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HDR_METADATA_n,
+					       ipa_endp_init_hdr_metadata_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_MODE_n,
+					       ipa_endp_init_mode_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_AGGR_n,
+					       ipa_endp_init_aggr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+					       ipa_endp_init_hol_block_en_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+					       ipa_endp_init_hol_block_timer_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_DEAGGR_n,
+					       ipa_endp_init_deaggr_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_STATUS_n,
+					       ipa_endp_status_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_RSRC_GRP_n,
+					       ipa_endp_init_rsrc_grp_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_INIT_SEQ_n,
+					       ipa_endp_init_seq_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_GSI_CFG_TLV_n,
+					       ipa_endp_gsi_cfg_tlv_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_GSI_CFG_AOS_n,
+					       ipa_endp_gsi_cfg_aos_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(IPA_ENDP_GSI_CFG1_n,
+					       ipa_endp_gsi_cfg1_n),
+	IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA
+		(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		 ipa_endp_filter_router_hsh_cfg_n),
+#endif
+};
+
+/* IPA uC PER registers save Cfg array */
+static struct map_src_dst_addr_s ipa_uc_regs_to_save_array[] = {
+	/* HWP registers */
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_SYS_ADDR,
+			     ipa.hwp,
+			     ipa_uc_qmb_sys_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_LOCAL_ADDR,
+			     ipa.hwp,
+			     ipa_uc_qmb_local_addr),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_LENGTH,
+			     ipa.hwp,
+			     ipa_uc_qmb_length),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_TRIGGER,
+			     ipa.hwp,
+			     ipa_uc_qmb_trigger),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_PENDING_TID,
+			     ipa.hwp,
+			     ipa_uc_qmb_pending_tid),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_COMPLETED_RD_FIFO_PEEK,
+			     ipa.hwp,
+			     ipa_uc_qmb_completed_rd_fifo_peek),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_COMPLETED_WR_FIFO_PEEK,
+			     ipa.hwp,
+			     ipa_uc_qmb_completed_wr_fifo_peek),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_MISC,
+			     ipa.hwp,
+			     ipa_uc_qmb_misc),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_STATUS,
+			     ipa.hwp,
+			     ipa_uc_qmb_status),
+	GEN_SRC_DST_ADDR_MAP(IPA_UC_QMB_BUS_ATTRIB,
+			     ipa.hwp,
+			     ipa_uc_qmb_bus_attrib),
+};
+
+static void ipa_hal_save_regs_save_ipa_testbus(void);
+static void ipa_reg_save_gsi_fifo_status(void);
+static void ipa_reg_save_rsrc_cnts(void);
+static void ipa_hal_save_regs_ipa_cmdq(void);
+static void ipa_hal_save_regs_rsrc_db(void);
+static void ipa_reg_save_anomaly_check(void);
+
+static struct reg_access_funcs_s *get_access_funcs(u32 addr)
+{
+	u32 i, asub = ipa3_ctx->sd_state;
+
+	for (i = 0; i < ARRAY_SIZE(mem_access_map); i++) {
+		if (addr >= mem_access_map[i].addr_range_begin &&
+			addr <= mem_access_map[i].addr_range_end) {
+			return mem_access_map[i].access[asub];
+		}
+	}
+
+	IPAERR("Unknown register offset(0x%08X). Using dflt access methods\n",
+		   addr);
+
+	return &io_matrix[AA_COMBO];
+}
+
+static u32 in_dword(
+	u32 addr)
+{
+	struct reg_access_funcs_s *io = get_access_funcs(addr);
+
+	return io->read(ipa3_ctx->reg_collection_base + addr);
+}
+
+static u32 in_dword_masked(
+	u32 addr,
+	u32 mask)
+{
+	struct reg_access_funcs_s *io = get_access_funcs(addr);
+	u32 val;
+
+	val = io->read(ipa3_ctx->reg_collection_base + addr);
+
+	if (io->read == act_read)
+		return val & mask;
+
+	return val;
+}
+
+static void out_dword(
+	u32 addr,
+	u32 val)
+{
+	struct reg_access_funcs_s *io = get_access_funcs(addr);
+
+	io->write(ipa3_ctx->reg_collection_base + addr, val);
+}
+
+/*
+ * FUNCTION:  ipa_save_gsi_ver
+ *
+ * Saves the gsi version
+ *
+ * @return
+ * None
+ */
+void ipa_save_gsi_ver(void)
+{
+	ipa_reg_save.gsi.fw_ver =
+		IPA_READ_1xVECTOR_REG(IPA_GSI_TOP_GSI_INST_RAM_n, 0) &
+		0x0000FFFF;
+}
+
+/*
+ * FUNCTION:  ipa_save_registers
+ *
+ * Saves all the IPA register values which are configured
+ *
+ * @return
+ * None
+ */
+void ipa_save_registers(void)
+{
+	u32 i = 0;
+	/* Fetch the number of registers configured to be saved */
+	u32 num_regs = ARRAY_SIZE(ipa_regs_to_save_array);
+	u32 num_uc_per_regs = ARRAY_SIZE(ipa_uc_regs_to_save_array);
+	union ipa_hwio_def_ipa_rsrc_mngr_db_cfg_u for_cfg;
+	union ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_u for_read;
+
+	if (!ipa3_ctx->do_register_collection_on_crash)
+		return;
+
+	IPAERR("Commencing\n");
+
+	/*
+	 * Remove the GSI FIFO and the endp registers for extra pipes for
+	 * now.  These would be saved later
+	 */
+	num_regs -= (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+		     IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS);
+
+	memset(&for_cfg, 0, sizeof(for_cfg));
+	memset(&for_read, 0, sizeof(for_read));
+
+	/* Now save all the configured registers */
+	for (i = 0; i < num_regs; i++) {
+		/* Copy reg value to our data struct */
+		*(ipa_regs_to_save_array[i].dst_addr) =
+			in_dword(ipa_regs_to_save_array[i].src_addr);
+	}
+
+	IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_ACTIVE();
+
+	/* Now save the per endp registers for the remaining pipes */
+	for (i = 0; i < (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+			 IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS); i++) {
+		/* Copy reg value to our data struct */
+		*(ipa_regs_to_save_array[num_regs + i].dst_addr) =
+			in_dword(ipa_regs_to_save_array[num_regs + i].src_addr);
+	}
+
+	IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA_ACTIVE();
+
+	num_regs += (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+		     IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS);
+
+	/* Saving GSI FIFO Status registers */
+	ipa_reg_save_gsi_fifo_status();
+
+	/*
+	 * On targets that support SSR, we generally want to disable
+	 * the following reg save functionality as it may cause stalls
+	 * in IPA after the SSR.
+	 *
+	 * To override this, set do_non_tn_collection_on_crash to
+	 * true, via dtsi, and the collection will be done.
+	 */
+	if (ipa3_ctx->do_non_tn_collection_on_crash) {
+		/* Save all the uC PER configured registers */
+		for (i = 0; i < num_uc_per_regs; i++) {
+			/* Copy reg value to our data struct */
+			*(ipa_uc_regs_to_save_array[i].dst_addr) =
+			    in_dword(ipa_uc_regs_to_save_array[i].src_addr);
+		}
+
+		/* Saving CMD Queue registers */
+		ipa_hal_save_regs_ipa_cmdq();
+
+		/* Collecting resource DB information */
+		ipa_hal_save_regs_rsrc_db();
+
+		/* Save IPA testbus */
+		if (ipa3_ctx->do_testbus_collection_on_crash)
+			ipa_hal_save_regs_save_ipa_testbus();
+	}
+
+	/* GSI test bus and QSB log */
+	for (i = 0;
+	     i < ARRAY_SIZE(ipa_reg_save_gsi_ch_test_bus_selector_array);
+	     i++) {
+		/* Write test bus selector */
+		IPA_WRITE_SCALER_REG(
+			GSI_TEST_BUS_SEL,
+			ipa_reg_save_gsi_ch_test_bus_selector_array[i]);
+
+		ipa_reg_save.gsi.debug.gsi_test_bus.test_bus_reg[
+		    i].gsi_testbus_reg =
+		    (u32) IPA_READ_SCALER_REG(GSI_TEST_BUS_REG);
+	}
+
+	ipa_reg_save_rsrc_cnts();
+
+	for (i = 0; i < HWIO_GSI_DEBUG_SW_RF_n_READ_MAXn + 1; i++)
+		ipa_reg_save.gsi.debug.gsi_mcs_regs.mcs_reg[i].rf_reg =
+			IPA_READ_1xVECTOR_REG(GSI_DEBUG_SW_RF_n_READ, i);
+
+	for (i = 0; i < HWIO_GSI_DEBUG_COUNTERn_MAXn + 1; i++)
+		ipa_reg_save.gsi.debug.gsi_cnt_regs.cnt[i].counter_value =
+			(u16)IPA_READ_1xVECTOR_REG(GSI_DEBUG_COUNTERn, i);
+
+	for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7; i++) {
+		u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.a7[
+			i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
+		u32 n = phys_ch_idx * IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
+
+		if (!ipa_reg_save.gsi.ch_cntxt.a7[
+				i].gsi_map_ee_n_ch_k_vp_table.valid)
+			continue;
+		ipa_reg_save.gsi.ch_cntxt.a7[
+			i].mcs_channel_scratch.scratch4.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 2);
+		ipa_reg_save.gsi.ch_cntxt.a7[
+			i].mcs_channel_scratch.scratch5.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 1);
+	}
+
+	for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_Q6; i++) {
+		u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.q6[
+			i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
+		u32 n = phys_ch_idx*IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
+
+		if (!ipa_reg_save.gsi.ch_cntxt.q6[
+				i].gsi_map_ee_n_ch_k_vp_table.valid)
+			continue;
+		ipa_reg_save.gsi.ch_cntxt.q6[
+			i].mcs_channel_scratch.scratch4.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 2);
+		ipa_reg_save.gsi.ch_cntxt.q6[
+			i].mcs_channel_scratch.scratch5.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 1);
+	}
+
+	for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC; i++) {
+		u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.uc[
+			i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
+		u32 n = phys_ch_idx*IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
+
+		if (!ipa_reg_save.gsi.ch_cntxt.uc[
+				i].gsi_map_ee_n_ch_k_vp_table.valid)
+			continue;
+		ipa_reg_save.gsi.ch_cntxt.uc[
+			i].mcs_channel_scratch.scratch4.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 2);
+		ipa_reg_save.gsi.ch_cntxt.uc[
+			i].mcs_channel_scratch.scratch5.shram =
+			IPA_READ_1xVECTOR_REG(
+				GSI_SHRAM_n,
+				n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 1);
+	}
+
+	/*
+	 * On targets that support SSR, we generally want to disable
+	 * the following reg save functionality as it may cause stalls
+	 * in IPA after the SSR.
+	 *
+	 * To override this, set do_non_tn_collection_on_crash to
+	 * true, via dtsi, and the collection will be done.
+	 */
+	if (ipa3_ctx->do_non_tn_collection_on_crash) {
+		u32 ofst = GEN_2xVECTOR_REG_OFST(IPA_CTX_ID_m_CTX_NUM_n, 0, 0);
+		struct reg_access_funcs_s *io = get_access_funcs(ofst);
+		/*
+		 * If the memory is accessible, copy pkt context directly from
+		 * IPA_CTX_ID register space
+		 */
+		if (io->read == act_read) {
+			memcpy((void *)ipa_reg_save.pkt_ctntx,
+				   (const void *)
+				   (ipa3_ctx->reg_collection_base + ofst),
+				   sizeof(ipa_reg_save.pkt_ctntx));
+
+			for_cfg.value =
+				IPA_READ_SCALER_REG(IPA_RSRC_MNGR_DB_CFG);
+
+			for_cfg.def.rsrc_type_sel = 0;
+
+			IPA_MASKED_WRITE_SCALER_REG(
+				IPA_RSRC_MNGR_DB_CFG,
+				for_cfg.value);
+
+			for (i = 0; i < IPA_HW_PKT_CTNTX_MAX; i++) {
+				for_cfg.def.rsrc_id_sel = i;
+
+				IPA_MASKED_WRITE_SCALER_REG(
+					IPA_RSRC_MNGR_DB_CFG,
+					for_cfg.value);
+
+				for_read.value =
+					IPA_READ_SCALER_REG(
+						IPA_RSRC_MNGR_DB_RSRC_READ);
+
+				if (for_read.def.rsrc_occupied) {
+					ipa_reg_save.pkt_ctntx_active[i] = true;
+					ipa_reg_save.pkt_cntxt_state[i] =
+						(enum ipa_hw_pkt_cntxt_state_e)
+						ipa_reg_save.pkt_ctntx[i].state;
+				}
+			}
+		} else {
+			IPAERR("IPA_CTX_ID is not currently accessible\n");
+		}
+	}
+
+	ipa_reg_save_anomaly_check();
+
+	IPAERR("Completed\n");
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_gsi_fifo_status
+ *
+ * This function saves the GSI FIFO Status registers for all endpoints
+ *
+ * @param
+ *
+ * @return
+ */
+static void ipa_reg_save_gsi_fifo_status(void)
+{
+	union ipa_hwio_def_ipa_gsi_fifo_status_ctrl_u gsi_fifo_status_ctrl;
+	u8 i;
+
+	memset(&gsi_fifo_status_ctrl, 0, sizeof(gsi_fifo_status_ctrl));
+
+	for (i = 0; i < IPA_HW_PIPE_ID_MAX; i++) {
+		gsi_fifo_status_ctrl.def.ipa_gsi_fifo_status_en = 1;
+		gsi_fifo_status_ctrl.def.ipa_gsi_fifo_status_port_sel = i;
+
+		IPA_MASKED_WRITE_SCALER_REG(IPA_GSI_FIFO_STATUS_CTRL,
+				     gsi_fifo_status_ctrl.value);
+
+		ipa_reg_save.gsi_fifo_status[i].gsi_fifo_status_ctrl.value =
+			IPA_READ_SCALER_REG(IPA_GSI_FIFO_STATUS_CTRL);
+		ipa_reg_save.gsi_fifo_status[i].gsi_tlv_fifo_status.value =
+			IPA_READ_SCALER_REG(IPA_GSI_TLV_FIFO_STATUS);
+		ipa_reg_save.gsi_fifo_status[i].gsi_aos_fifo_status.value =
+			IPA_READ_SCALER_REG(IPA_GSI_AOS_FIFO_STATUS);
+	}
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_rsrc_cnts
+ *
+ * This function saves the resource counts for all PCIE and DDR
+ * resource groups.
+ *
+ * @param
+ * @return
+ */
+static void ipa_reg_save_rsrc_cnts(void)
+{
+	union ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_u
+		src_0123_rsrc_cnt;
+	union ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_u
+		dst_0123_rsrc_cnt;
+
+	ipa_reg_save.rsrc_cnts.pcie.resource_group = IPA_HW_PCIE_SRC_RSRP_GRP;
+	ipa_reg_save.rsrc_cnts.ddr.resource_group = IPA_HW_DDR_SRC_RSRP_GRP;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 0);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.pkt_cntxt =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.pkt_cntxt =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 1);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.descriptor_list =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.descriptor_list =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 2);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.data_descriptor_buffer =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.data_descriptor_buffer =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 3);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.hps_dmars =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.hps_dmars =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	src_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_SRC_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 4);
+
+	ipa_reg_save.rsrc_cnts.pcie.src.reserved_acks =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.src.reserved_acks =
+		src_0123_rsrc_cnt.def.src_rsrc_grp_1_cnt;
+
+	dst_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 0);
+
+	ipa_reg_save.rsrc_cnts.pcie.dst.reserved_sectors =
+		dst_0123_rsrc_cnt.def.dst_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.dst.reserved_sectors =
+		dst_0123_rsrc_cnt.def.dst_rsrc_grp_1_cnt;
+
+	dst_0123_rsrc_cnt.value =
+		IPA_READ_1xVECTOR_REG(IPA_DST_RSRC_GRP_0123_RSRC_TYPE_CNT_n, 1);
+
+	ipa_reg_save.rsrc_cnts.pcie.dst.dps_dmars =
+		dst_0123_rsrc_cnt.def.dst_rsrc_grp_0_cnt;
+	ipa_reg_save.rsrc_cnts.ddr.dst.dps_dmars =
+		dst_0123_rsrc_cnt.def.dst_rsrc_grp_1_cnt;
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_rsrc_cnts_test_bus
+ *
+ * This function saves the resource counts for all PCIE and DDR
+ * resource groups collected from test bus.
+ *
+ * @param
+ *
+ * @return
+ */
+void ipa_reg_save_rsrc_cnts_test_bus(void)
+{
+	int32_t rsrc_type = 0;
+
+	ipa_reg_save.rsrc_cnts.pcie.resource_group = IPA_HW_PCIE_SRC_RSRP_GRP;
+	ipa_reg_save.rsrc_cnts.ddr.resource_group = IPA_HW_DDR_SRC_RSRP_GRP;
+
+	rsrc_type = 0;
+	ipa_reg_save.rsrc_cnts.pcie.src.pkt_cntxt =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.pkt_cntxt =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 1;
+	ipa_reg_save.rsrc_cnts.pcie.src.descriptor_list =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.descriptor_list =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 2;
+	ipa_reg_save.rsrc_cnts.pcie.src.data_descriptor_buffer =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.data_descriptor_buffer =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 3;
+	ipa_reg_save.rsrc_cnts.pcie.src.hps_dmars =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.hps_dmars =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 4;
+	ipa_reg_save.rsrc_cnts.pcie.src.reserved_acks =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_SRC_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.src.reserved_acks =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_SRC_RSRP_GRP);
+
+	rsrc_type = 5;
+	ipa_reg_save.rsrc_cnts.pcie.dst.reserved_sectors =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_DEST_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.dst.reserved_sectors =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_DEST_RSRP_GRP);
+
+	rsrc_type = 6;
+	ipa_reg_save.rsrc_cnts.pcie.dst.dps_dmars =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_PCIE_DEST_RSRP_GRP);
+
+	ipa_reg_save.rsrc_cnts.ddr.dst.dps_dmars =
+		IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type,
+						    IPA_HW_DDR_DEST_RSRP_GRP);
+}
+
+/*
+ * FUNCTION:  ipa_hal_save_regs_ipa_cmdq
+ *
+ * This function saves the various IPA CMDQ registers
+ *
+ * @param
+ *
+ * @return
+ */
+static void ipa_hal_save_regs_ipa_cmdq(void)
+{
+	int32_t i;
+	union ipa_hwio_def_ipa_rx_hps_cmdq_cmd_u rx_hps_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_hps_dps_cmdq_cmd_u hps_dps_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_dps_tx_cmdq_cmd_u dps_tx_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_ackmngr_cmdq_cmd_u ackmngr_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_u
+		prod_ackmngr_cmdq_cmd = { { 0 } };
+	union ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_u ntf_tx_cmdq_cmd = { { 0 } };
+
+	/* Save RX_HPS CMDQ   */
+	for (i = 0; i < IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS; i++) {
+		rx_hps_cmdq_cmd.def.rd_req = 0;
+		rx_hps_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_RX_HPS_CMDQ_CMD,
+				     rx_hps_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_COUNT);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_STATUS);
+		rx_hps_cmdq_cmd.def.rd_req = 1;
+		rx_hps_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_RX_HPS_CMDQ_CMD,
+				     rx_hps_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_data_rd_0_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_DATA_RD_0);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_data_rd_1_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_DATA_RD_1);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_data_rd_2_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_DATA_RD_2);
+		ipa_reg_save.ipa.dbg.ipa_rx_hps_cmdq_data_rd_3_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_RX_HPS_CMDQ_DATA_RD_3);
+	}
+
+	/* Save HPS_DPS CMDQ   */
+	for (i = 0; i < IPA_TESTBUS_SEL_EP_MAX + 1; i++) {
+		hps_dps_cmdq_cmd.def.rd_req = 0;
+		hps_dps_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_HPS_DPS_CMDQ_CMD,
+				     hps_dps_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_hps_dps_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_HPS_DPS_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_hps_dps_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_HPS_DPS_CMDQ_COUNT);
+
+		hps_dps_cmdq_cmd.def.rd_req = 1;
+		hps_dps_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_HPS_DPS_CMDQ_CMD,
+				     hps_dps_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_hps_dps_cmdq_data_rd_0_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_HPS_DPS_CMDQ_DATA_RD_0);
+	}
+
+	/* Save DPS_TX CMDQ   */
+	for (i = 0; i < IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS; i++) {
+		dps_tx_cmdq_cmd.def.cmd_client = i;
+		dps_tx_cmdq_cmd.def.rd_req = 0;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_DPS_TX_CMDQ_CMD,
+				     dps_tx_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_dps_tx_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_DPS_TX_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_dps_tx_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_DPS_TX_CMDQ_COUNT);
+
+		dps_tx_cmdq_cmd.def.cmd_client = i;
+		dps_tx_cmdq_cmd.def.rd_req = 1;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_DPS_TX_CMDQ_CMD,
+				     dps_tx_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_dps_tx_cmdq_data_rd_0_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_DPS_TX_CMDQ_DATA_RD_0);
+	}
+
+	/* Save ACKMNGR CMDQ   */
+	for (i = 0; i < IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS; i++) {
+		ackmngr_cmdq_cmd.def.rd_req = 0;
+		ackmngr_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_ACKMNGR_CMDQ_CMD,
+				     ackmngr_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_ackmngr_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_ACKMNGR_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_ackmngr_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_ACKMNGR_CMDQ_COUNT);
+
+		ackmngr_cmdq_cmd.def.rd_req = 1;
+		ackmngr_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_ACKMNGR_CMDQ_CMD,
+				     ackmngr_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_ackmngr_cmdq_data_rd_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_ACKMNGR_CMDQ_DATA_RD);
+	}
+
+	/* Save PROD ACKMNGR CMDQ   */
+	for (i = 0; i < IPA_TESTBUS_SEL_EP_MAX + 1; i++) {
+		prod_ackmngr_cmdq_cmd.def.rd_req = 0;
+		prod_ackmngr_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_PROD_ACKMNGR_CMDQ_CMD,
+				     prod_ackmngr_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_prod_ackmngr_cmdq_status_arr[i].value
+			= IPA_READ_SCALER_REG(
+				IPA_PROD_ACKMNGR_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_prod_ackmngr_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_PROD_ACKMNGR_CMDQ_COUNT);
+		prod_ackmngr_cmdq_cmd.def.rd_req = 1;
+		prod_ackmngr_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_PROD_ACKMNGR_CMDQ_CMD,
+				     prod_ackmngr_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_prod_ackmngr_cmdq_data_rd_arr[
+			i].value =
+			IPA_READ_SCALER_REG(
+				IPA_PROD_ACKMNGR_CMDQ_DATA_RD);
+	}
+
+	/* Save NTF_TX CMDQ   */
+	for (i = 0; i < IPA_TESTBUS_SEL_EP_MAX + 1; i++) {
+		ntf_tx_cmdq_cmd.def.rd_req = 0;
+		ntf_tx_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_NTF_TX_CMDQ_CMD,
+				     ntf_tx_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_ntf_tx_cmdq_status_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_NTF_TX_CMDQ_STATUS);
+		ipa_reg_save.ipa.dbg.ipa_ntf_tx_cmdq_count_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_NTF_TX_CMDQ_COUNT);
+		ntf_tx_cmdq_cmd.def.rd_req = 1;
+		ntf_tx_cmdq_cmd.def.cmd_client = i;
+		IPA_MASKED_WRITE_SCALER_REG(IPA_NTF_TX_CMDQ_CMD,
+				     ntf_tx_cmdq_cmd.value);
+		ipa_reg_save.ipa.dbg.ipa_ntf_tx_cmdq_data_rd_0_arr[i].value =
+			IPA_READ_SCALER_REG(IPA_NTF_TX_CMDQ_DATA_RD_0);
+	}
+}
+
+/*
+ * FUNCTION:  ipa_hal_save_regs_save_ipa_testbus
+ *
+ * This function saves the IPA testbus
+ *
+ * @param
+ *
+ * @return
+ */
+static void ipa_hal_save_regs_save_ipa_testbus(void)
+{
+	s32 sel_internal, sel_external, sel_ep;
+	union ipa_hwio_def_ipa_testbus_sel_u testbus_sel = { { 0 } };
+
+	if (ipa_reg_save.ipa.testbus == NULL) {
+		/*
+		 * Test-bus structure not allocated - exit test-bus collection
+		 */
+		IPADBG("ipa_reg_save.ipa.testbus was not allocated\n");
+		return;
+	}
+
+	/* Enable Test-bus */
+	testbus_sel.value = 0;
+	testbus_sel.def.testbus_en = true;
+
+	IPA_WRITE_SCALER_REG(IPA_TESTBUS_SEL, testbus_sel.value);
+
+	for (sel_external = 0;
+		 sel_external <= IPA_TESTBUS_SEL_EXTERNAL_MAX;
+		 sel_external++) {
+
+		for (sel_internal = 0;
+			 sel_internal <= IPA_TESTBUS_SEL_INTERNAL_MAX;
+			 sel_internal++) {
+
+			testbus_sel.def.pipe_select = 0;
+			testbus_sel.def.external_block_select =
+				sel_external;
+			testbus_sel.def.internal_block_select =
+				sel_internal;
+
+			IPA_MASKED_WRITE_SCALER_REG(
+				IPA_TESTBUS_SEL,
+				testbus_sel.value);
+
+			ipa_reg_save.ipa.testbus->global.global[
+				sel_internal][sel_external].testbus_sel.value =
+				testbus_sel.value;
+
+			ipa_reg_save.ipa.testbus->global.global[
+				sel_internal][sel_external].testbus_data.value =
+				IPA_READ_SCALER_REG(IPA_DEBUG_DATA);
+		}
+	}
+
+	/* Collect per EP test bus */
+	for (sel_ep = 0;
+		 sel_ep <= IPA_TESTBUS_SEL_EP_MAX;
+		 sel_ep++) {
+
+		for (sel_external = 0;
+			 sel_external <=
+				 IPA_TESTBUS_SEL_EXTERNAL_MAX;
+			 sel_external++) {
+
+			for (sel_internal = 0;
+				 sel_internal <=
+					 IPA_TESTBUS_SEL_INTERNAL_PIPE_MAX;
+				 sel_internal++) {
+
+				testbus_sel.def.pipe_select = sel_ep;
+				testbus_sel.def.external_block_select =
+					sel_external;
+				testbus_sel.def.internal_block_select =
+					sel_internal;
+
+				IPA_MASKED_WRITE_SCALER_REG(
+					IPA_TESTBUS_SEL,
+					testbus_sel.value);
+
+				ipa_reg_save.ipa.testbus->ep[sel_ep].entry_ep[
+					sel_internal][sel_external].
+					testbus_sel.value =
+					testbus_sel.value;
+
+				ipa_reg_save.ipa.testbus->ep[sel_ep].entry_ep[
+					sel_internal][sel_external].
+					testbus_data.value =
+					IPA_READ_SCALER_REG(
+						IPA_DEBUG_DATA);
+			}
+		}
+	}
+
+	/* Disable Test-bus */
+	testbus_sel.value = 0;
+
+	IPA_WRITE_SCALER_REG(
+		IPA_TESTBUS_SEL,
+		testbus_sel.value);
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_init
+ *
+ * This function initializes and memsets the register save struct.
+ *
+ * @param
+ *
+ * @return
+ */
+int ipa_reg_save_init(u32 value)
+{
+	u32 i, num_regs = ARRAY_SIZE(ipa_regs_to_save_array);
+
+	if (!ipa3_ctx->do_register_collection_on_crash)
+		return 0;
+
+	memset(&ipa_reg_save, value, sizeof(ipa_reg_save));
+
+	ipa_reg_save.ipa.testbus = NULL;
+
+	if (ipa3_ctx->do_testbus_collection_on_crash) {
+		ipa_reg_save.ipa.testbus =
+		    (struct ipa_reg_save_ipa_testbus_s *) ipa_testbus_mem;
+	}
+
+	/* setup access for register collection/dump on crash */
+	IPADBG("Mapping 0x%x bytes starting at 0x%x\n",
+	       ipa3_ctx->entire_ipa_block_size,
+	       ipa3_ctx->ipa_wrapper_base);
+
+	ipa3_ctx->reg_collection_base =
+		ioremap(ipa3_ctx->ipa_wrapper_base,
+			ipa3_ctx->entire_ipa_block_size);
+
+	if (!ipa3_ctx->reg_collection_base) {
+		IPAERR(":register collection ioremap err\n");
+		return -EFAULT;
+	}
+
+	num_regs -= (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+		     IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS);
+
+	for (i = 0; i < (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
+			 IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS); i++)
+		*(ipa_regs_to_save_array[num_regs + i].dst_addr) = 0x0;
+
+	return 0;
+}
+
+/*
+ * FUNCTION:  ipa_hal_save_regs_rsrc_db
+ *
+ * This function saves the various IPA RSRC_MNGR_DB registers
+ *
+ * @param
+ *
+ * @return
+ */
+static void ipa_hal_save_regs_rsrc_db(void)
+{
+	u32 rsrc_type = 0;
+	u32 rsrc_id = 0;
+	u32 rsrc_group = 0;
+	union ipa_hwio_def_ipa_rsrc_mngr_db_cfg_u
+		ipa_rsrc_mngr_db_cfg = { { 0 } };
+
+	ipa_rsrc_mngr_db_cfg.def.rsrc_grp_sel = rsrc_group;
+
+	for (rsrc_type = 0; rsrc_type <= IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX;
+	     rsrc_type++) {
+		for (rsrc_id = 0; rsrc_id <= IPA_RSCR_MNGR_DB_RSRC_ID_MAX;
+		     rsrc_id++) {
+			ipa_rsrc_mngr_db_cfg.def.rsrc_id_sel = rsrc_id;
+			ipa_rsrc_mngr_db_cfg.def.rsrc_type_sel = rsrc_type;
+			IPA_MASKED_WRITE_SCALER_REG(IPA_RSRC_MNGR_DB_CFG,
+					     ipa_rsrc_mngr_db_cfg.value);
+			ipa_reg_save.ipa.dbg.ipa_rsrc_mngr_db_rsrc_read_arr
+			    [rsrc_type][rsrc_id].value =
+			    IPA_READ_SCALER_REG(
+					IPA_RSRC_MNGR_DB_RSRC_READ);
+			ipa_reg_save.ipa.dbg.ipa_rsrc_mngr_db_list_read_arr
+			    [rsrc_type][rsrc_id].value =
+			    IPA_READ_SCALER_REG(
+					IPA_RSRC_MNGR_DB_LIST_READ);
+		}
+	}
+}
+
+/*
+ * FUNCTION:  ipa_reg_save_anomaly_check
+ *
+ * Checks RX state and TX state upon crash dump collection and prints
+ * anomalies.
+ *
+ * TBD- Add more anomaly checks in the future.
+ *
+ * @return
+ */
+static void ipa_reg_save_anomaly_check(void)
+{
+	if ((ipa_reg_save.ipa.gen.ipa_state.rx_wait != 0)
+	    || (ipa_reg_save.ipa.gen.ipa_state.rx_idle != 1)) {
+		IPADBG(
+		    "RX ACTIVITY, ipa_state.rx_wait = %d, ipa_state.rx_idle = %d, ipa_state_rx_active.endpoints = %d (bitmask)\n",
+		    ipa_reg_save.ipa.gen.ipa_state.rx_wait,
+		    ipa_reg_save.ipa.gen.ipa_state.rx_idle,
+		    ipa_reg_save.ipa.gen.ipa_state_rx_active.endpoints);
+
+		if (ipa_reg_save.ipa.gen.ipa_state.tx_idle != 1) {
+			IPADBG(
+			    "TX ACTIVITY, ipa_state.idle = %d, ipa_state_tx_wrapper.tx0_idle = %d, ipa_state_tx_wrapper.tx1_idle = %d\n",
+			    ipa_reg_save.ipa.gen.ipa_state.tx_idle,
+			    ipa_reg_save.ipa.gen.ipa_state_tx_wrapper.tx0_idle,
+			    ipa_reg_save.ipa.gen.ipa_state_tx_wrapper.tx1_idle);
+
+			IPADBG(
+			    "ipa_state_tx0.last_cmd_pipe = %d, ipa_state_tx1.last_cmd_pipe = %d\n",
+			    ipa_reg_save.ipa.gen.ipa_state_tx0.last_cmd_pipe,
+			    ipa_reg_save.ipa.gen.ipa_state_tx1.last_cmd_pipe);
+		}
+	}
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
new file mode 100644
index 0000000..9e7258f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
@@ -0,0 +1,1416 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#if !defined(_IPA_REG_DUMP_H_)
+#define _IPA_REG_DUMP_H_
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include "ipa_i.h"
+
+#include "ipa_pkt_cntxt.h"
+#include "ipa_hw_common_ex.h"
+
+/*
+ * The following macros are used to peek and poke register values and
+ * are required by some of the macros and include files that follow...
+ */
+#define my_in_dword(addr) \
+	({ u32 __v = readl_relaxed((addr)); __iormb(); __v; })
+
+#define my_out_dword(addr, val) \
+	({ __iowmb(); writel_relaxed((val), (addr)); })
+
+#define IPA_0_IPA_WRAPPER_BASE 0 /* required by following includes */
+
+#include "ipa_hwio.h"
+#include "gsi_hwio.h"
+#include "ipa_gcc_hwio.h"
+
+#include "ipa_hwio_def.h"
+#include "gsi_hwio_def.h"
+#include "ipa_gcc_hwio_def.h"
+
+#define IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS     0x6
+#define IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS     0x4
+#define IPA_DEBUG_TESTBUS_RSRC_NUM_EP            7
+#define IPA_DEBUG_TESTBUS_RSRC_NUM_GRP           3
+#define IPA_TESTBUS_SEL_EP_MAX                   0x1F
+#define IPA_TESTBUS_SEL_EXTERNAL_MAX             0x40
+#define IPA_TESTBUS_SEL_INTERNAL_MAX             0xFF
+#define IPA_TESTBUS_SEL_INTERNAL_PIPE_MAX        0x40
+#define IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS     0x9
+#define IPA_RSCR_MNGR_DB_RSRC_ID_MAX             0x3F
+#define IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX           0xA
+
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_ZEROS   (0x0)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_0   (0x1)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_1   (0x2)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_2   (0x3)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_3   (0x4)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_4   (0x5)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_DB_ENG  (0x9)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_0   (0xB)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_1   (0xC)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_2   (0xD)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_3   (0xE)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_0   (0x13)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_1   (0x14)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_2   (0x15)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_3   (0x16)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_4   (0x17)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_5   (0x18)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_0    (0x1B)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_1    (0x1C)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_0    (0x1F)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_1    (0x20)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_2    (0x21)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_3    (0x22)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_4    (0x23)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_0  (0x27)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_1  (0x28)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_2  (0x29)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_3  (0x2A)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_0   (0x2B)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_1   (0x2C)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_2   (0x2D)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_3   (0x2E)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_0 (0x33)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_1 (0x34)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_2 (0x35)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_3 (0x36)
+#define HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR     (0x3A)
+
+#define IPA_DEBUG_TESTBUS_DEF_EXTERNAL           50
+#define IPA_DEBUG_TESTBUS_DEF_INTERNAL           6
+
+#define IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM        8
+
+#define IPA_REG_SAVE_GSI_NUM_EE                  3
+
+#define IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS         22
+
+#define IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_BIT_MASK 0x7E000
+#define IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_SHIFT    13
+
+#define IPA_REG_SAVE_HWP_GSI_EE                  2
+
+/*
+ * A structure used to map a source address to destination address...
+ */
+struct map_src_dst_addr_s {
+	u32  src_addr; /* register offset to copy value from */
+	u32 *dst_addr; /* memory address to copy register value to */
+};
+
+/*
+ * A macro to generate the names of scaler (ie. non-vector) registers
+ * that reside in the *hwio.h files (said files contain the manifest
+ * constants for the registers' offsets in the register memory map).
+ */
+#define GEN_SCALER_REG_OFST(reg_name) \
+	(HWIO_ ## reg_name ## _ADDR)
+/*
+ * A macro designed to generate the rmsk associated with reg_name
+ */
+#define GEN_SCALER_REG_RMSK(reg_name) \
+	(HWIO_ ## reg_name ## _RMSK)
+
+/*
+ * A macro to generate the names of vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate access to registers that are
+ * addressed via one dimension.
+ */
+#define GEN_1xVECTOR_REG_OFST(reg_name, row) \
+	(HWIO_ ## reg_name ## _ADDR(row))
+
+/*
+ * A macro to generate the names of vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate access to registers that are
+ * addressed via two dimensions.
+ */
+#define GEN_2xVECTOR_REG_OFST(reg_name, row, col) \
+	(HWIO_ ## reg_name ## _ADDR(row, col))
+
+/*
+ * A macro to generate the access to scaler registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate read access from a scaler
+ * register..
+ */
+#define IPA_READ_SCALER_REG(reg_name) \
+	HWIO_ ## reg_name ## _IN
+
+/*
+ * A macro to generate the access to vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate read access from a one
+ * dimensional vector register...
+ */
+#define IPA_READ_1xVECTOR_REG(reg_name, row) \
+	HWIO_ ## reg_name ## _INI(row)
+
+/*
+ * A macro to generate the access to vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate read access from a two
+ * dimensional vector register...
+ */
+#define IPA_READ_2xVECTOR_REG(reg_name, row, col) \
+	HWIO_ ## reg_name ## _INI2(row, col)
+
+/*
+ * A macro to generate the access to scaler registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate write access to a scaler
+ * register..
+ */
+#define IPA_WRITE_SCALER_REG(reg_name, val) \
+	HWIO_ ## reg_name ## _OUT(val)
+
+/*
+ * Similar to the above, but with val masked by the register's rmsk...
+ */
+#define IPA_MASKED_WRITE_SCALER_REG(reg_name, val) \
+	out_dword(GEN_SCALER_REG_OFST(reg_name), \
+			  (GEN_SCALER_REG_RMSK(reg_name) & val))
+
+/*
+ * A macro to generate the access to vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate write access to a one
+ * dimensional vector register...
+ */
+#define IPA_WRITE_1xVECTOR_REG(reg_name, row, val) \
+	HWIO_ ## reg_name ## _OUTI(row, val)
+
+/*
+ * A macro to generate the access to vector registers that reside in
+ * the *hwio.h files (said files contain the manifest constants for
+ * the registers' offsets in the register memory map). More
+ * specifically, this macro will generate write access to a two
+ * dimensional vector register...
+ */
+#define IPA_WRITE_2xVECTOR_REG(reg_name, row, col, val) \
+	HWIO_ ## reg_name ## _OUTI2(row, col, val)
+
+/*
+ * Macro that helps generate a mapping between a register's address
+ * and where the register's value will get stored (ie. source and
+ * destination address mapping) upon dump...
+ */
+#define GEN_SRC_DST_ADDR_MAP(reg_name, sub_struct, field_name) \
+	{ GEN_SCALER_REG_OFST(reg_name), \
+	  (u32 *)&ipa_reg_save.sub_struct.field_name }
+
+/*
+ * Macro to get value of bits 18:13, used tp get rsrc cnts from
+ * IPA_DEBUG_DATA
+ */
+#define IPA_DEBUG_TESTBUS_DATA_GET_RSRC_CNT_BITS_FROM_DEBUG_DATA(x) \
+	((x & IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_BIT_MASK) >> \
+	 IPA_DEBUG_TESTBUS_RSRC_TYPE_CNT_SHIFT)
+
+/*
+ * Macro to get rsrc cnt of specific rsrc type and rsrc grp from test
+ * bus collected data
+ */
+#define IPA_DEBUG_TESTBUS_GET_RSRC_TYPE_CNT(rsrc_type, rsrc_grp) \
+	IPA_DEBUG_TESTBUS_DATA_GET_RSRC_CNT_BITS_FROM_DEBUG_DATA( \
+		ipa_reg_save.ipa.testbus->ep_rsrc[rsrc_type].entry_ep \
+		[rsrc_grp].testbus_data.value)
+
+/*
+ * Macro to pluck the gsi version from ram.
+ */
+#define IPA_REG_SAVE_GSI_VER(reg_name, var_name)	\
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.gsi.gen.var_name }
+/*
+ * Macro to define a particular register cfg entry for all 3 EE
+ * indexed register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_GEN_EE(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE), \
+		(u32 *)&ipa_reg_save.ipa.gen_ee[IPA_HW_Q6_EE].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE), \
+		(u32 *)&ipa_reg_save.ipa.gen_ee[IPA_HW_A7_EE].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_HWP_EE), \
+		(u32 *)&ipa_reg_save.ipa.gen_ee[IPA_HW_HWP_EE].var_name }
+
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_FIFO(reg_name, var_name, index) \
+	{ GEN_SCALER_REG_OFST(reg_name), \
+		(u32 *)&ipa_reg_save.ipa.gsi_fifo_status[index].var_name }
+
+/*
+ * Macro to define a particular register cfg entry for all pipe
+ * indexed register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.pipes[0].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.pipes[1].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.pipes[2].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.ipa.pipes[3].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 4), \
+		(u32 *)&ipa_reg_save.ipa.pipes[4].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 5), \
+		(u32 *)&ipa_reg_save.ipa.pipes[5].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 6), \
+		(u32 *)&ipa_reg_save.ipa.pipes[6].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 7), \
+		(u32 *)&ipa_reg_save.ipa.pipes[7].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 8), \
+		(u32 *)&ipa_reg_save.ipa.pipes[8].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 9), \
+		(u32 *)&ipa_reg_save.ipa.pipes[9].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 10), \
+		(u32 *)&ipa_reg_save.ipa.pipes[10].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 11), \
+		(u32 *)&ipa_reg_save.ipa.pipes[11].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 12), \
+		(u32 *)&ipa_reg_save.ipa.pipes[12].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 13), \
+		(u32 *)&ipa_reg_save.ipa.pipes[13].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 14), \
+		(u32 *)&ipa_reg_save.ipa.pipes[14].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 15), \
+		(u32 *)&ipa_reg_save.ipa.pipes[15].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 16), \
+		(u32 *)&ipa_reg_save.ipa.pipes[16].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 17), \
+		(u32 *)&ipa_reg_save.ipa.pipes[17].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 18), \
+		(u32 *)&ipa_reg_save.ipa.pipes[18].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 19), \
+		(u32 *)&ipa_reg_save.ipa.pipes[19].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 20), \
+		(u32 *)&ipa_reg_save.ipa.pipes[20].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 21), \
+		(u32 *)&ipa_reg_save.ipa.pipes[21].endp.var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 22), \
+		(u32 *)&ipa_reg_save.ipa.pipes[22].endp.var_name }
+
+/*
+ * Macro to define a particular register cfg entry for all pipe
+ * indexed register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(reg_name, var_name) \
+	{ 0, 0 }
+
+/*
+ * Macro to define a particular register cfg entry for all resource
+ * group register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_GRP(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[0].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[1].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[2].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[3].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 4), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[4].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 5), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[5].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 6), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[6].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 7), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_grp[7].var_name }
+
+/*
+ * Macro to define a particular register cfg entry for all resource
+ * group register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_GRP(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_grp[0].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_grp[1].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_grp[2].var_name }
+
+/*
+ * Macro to define a particular register cfg entry for all source
+ * resource group count register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_SRC_RSRC_CNT_GRP(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[0].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[1].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[2].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[3].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 4), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[4].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 5), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[5].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 6), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[6].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 7), \
+		(u32 *)&ipa_reg_save.ipa.src_rsrc_cnt[7].var_name }
+
+/*
+ * Macro to define a particular register cfg entry for all dest
+ * resource group count register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_DST_RSRC_CNT_GRP(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_cnt[0].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_cnt[1].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.dst_rsrc_cnt[2].var_name }
+
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_GENERAL_EE(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE), \
+		(u32 *)&ipa_reg_save.gsi.gen_ee[IPA_HW_A7_EE].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE), \
+		(u32 *)&ipa_reg_save.gsi.gen_ee[IPA_HW_Q6_EE].var_name }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE), \
+		(u32 *)&ipa_reg_save.gsi.gen_ee[IPA_REG_SAVE_HWP_GSI_EE].\
+			var_name }
+
+/*
+ * Macro to define a particular register cfg entry for all GSI EE
+ * register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(reg_name, var_name) \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 0), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[0].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 1), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[1].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 2), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[2].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 3), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[3].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 4), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[4].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 5), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[5].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 0), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[0].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 1), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[1].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 2), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[2].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 3), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[3].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 4), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[4].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 5), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[5].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 6), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[6].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 7), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[7].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 8), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[8].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 9), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[9].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 10), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[10].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 11), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[11].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 12), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[12].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 13), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[13].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[0].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 3), \
+		(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[1].var_name }
+
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(reg_name, var_name) \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 0), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[0].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 1), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[1].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 2), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[2].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 3), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[3].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 0), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[0].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 1), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[1].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 2), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[2].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 3), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[3].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 4), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[4].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 5), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[5].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 6), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[6].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 7), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[7].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 8), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[8].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 9), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[9].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 10), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[10].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 11), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[11].var_name }, \
+	{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1), \
+		(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[0].var_name }
+
+/*
+ * Macro to define a particular register cfg entry for GSI QSB debug
+ * registers
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_GSI_QSB_DEBUG(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.gsi.debug.gsi_qsb_debug.var_name[0] }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.gsi.debug.gsi_qsb_debug.var_name[1] }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.gsi.debug.gsi_qsb_debug.var_name[2] }, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.gsi.debug.gsi_qsb_debug.var_name[3] }
+
+#define IPA_REG_SAVE_RX_SPLT_CMDQ(reg_name, var_name) \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+		(u32 *)&ipa_reg_save.ipa.dbg.var_name[0]}, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+		(u32 *)&ipa_reg_save.ipa.dbg.var_name[1]}, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+		(u32 *)&ipa_reg_save.ipa.dbg.var_name[2]}, \
+	{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+		(u32 *)&ipa_reg_save.ipa.dbg.var_name[3]}
+
+/*
+ * IPA HW Platform Type
+ */
+enum ipa_hw_ee_e {
+	IPA_HW_A7_EE  = 0, /* A7's execution environment */
+	IPA_HW_Q6_EE  = 1, /* Q6's execution environment */
+	IPA_HW_HWP_EE = 3, /* HWP's execution environment */
+	IPA_HW_EE_MAX,     /* Max EE to support */
+};
+
+/*
+ * General IPA register save data struct (ie. this is where register
+ * values, once read, get placed...
+ */
+struct ipa_gen_regs_s {
+	struct ipa_hwio_def_ipa_state_s
+	  ipa_state;
+	struct ipa_hwio_def_ipa_state_rx_active_s
+	  ipa_state_rx_active;
+	struct ipa_hwio_def_ipa_state_tx_wrapper_s
+	  ipa_state_tx_wrapper;
+	struct ipa_hwio_def_ipa_state_tx0_s
+	  ipa_state_tx0;
+	struct ipa_hwio_def_ipa_state_tx1_s
+	  ipa_state_tx1;
+	struct ipa_hwio_def_ipa_state_aggr_active_s
+	  ipa_state_aggr_active;
+	struct ipa_hwio_def_ipa_state_dfetcher_s
+	  ipa_state_dfetcher;
+	struct ipa_hwio_def_ipa_state_fetcher_mask_0_s
+	  ipa_state_fetcher_mask_0;
+	struct ipa_hwio_def_ipa_state_fetcher_mask_1_s
+	  ipa_state_fetcher_mask_1;
+	struct ipa_hwio_def_ipa_state_gsi_aos_s
+	  ipa_state_gsi_aos;
+	struct ipa_hwio_def_ipa_state_gsi_if_s
+	  ipa_state_gsi_if;
+	struct ipa_hwio_def_ipa_state_gsi_skip_s
+	  ipa_state_gsi_skip;
+	struct ipa_hwio_def_ipa_state_gsi_tlv_s
+	  ipa_state_gsi_tlv;
+	struct ipa_hwio_def_ipa_dpl_timer_lsb_s
+	  ipa_dpl_timer_lsb;
+	struct ipa_hwio_def_ipa_dpl_timer_msb_s
+	  ipa_dpl_timer_msb;
+	struct ipa_hwio_def_ipa_proc_iph_cfg_s
+	  ipa_proc_iph_cfg;
+	struct ipa_hwio_def_ipa_route_s
+	  ipa_route;
+	struct ipa_hwio_def_ipa_spare_reg_1_s
+	  ipa_spare_reg_1;
+	struct ipa_hwio_def_ipa_spare_reg_2_s
+	  ipa_spare_reg_2;
+	struct ipa_hwio_def_ipa_log_s
+	  ipa_log;
+	struct ipa_hwio_def_ipa_log_buf_status_cfg_s
+	  ipa_log_buf_status_cfg;
+	struct ipa_hwio_def_ipa_log_buf_status_addr_s
+	  ipa_log_buf_status_addr;
+	struct ipa_hwio_def_ipa_log_buf_status_write_ptr_s
+	  ipa_log_buf_status_write_ptr;
+	struct ipa_hwio_def_ipa_log_buf_status_ram_ptr_s
+	  ipa_log_buf_status_ram_ptr;
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_cfg_s
+	  ipa_log_buf_hw_cmd_cfg;
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_addr_s
+	  ipa_log_buf_hw_cmd_addr;
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_write_ptr_s
+	  ipa_log_buf_hw_cmd_write_ptr;
+	struct ipa_hwio_def_ipa_log_buf_hw_cmd_ram_ptr_s
+	  ipa_log_buf_hw_cmd_ram_ptr;
+	struct ipa_hwio_def_ipa_comp_hw_version_s
+	  ipa_comp_hw_version;
+	struct ipa_hwio_def_ipa_filt_rout_hash_en_s
+	  ipa_filt_rout_hash_en;
+	struct ipa_hwio_def_ipa_filt_rout_hash_flush_s
+	  ipa_filt_rout_hash_flush;
+	struct ipa_hwio_def_ipa_state_fetcher_s
+	  ipa_state_fetcher;
+	struct ipa_hwio_def_ipa_ipv4_filter_init_values_s
+	  ipa_ipv4_filter_init_values;
+	struct ipa_hwio_def_ipa_ipv6_filter_init_values_s
+	  ipa_ipv6_filter_init_values;
+	struct ipa_hwio_def_ipa_ipv4_route_init_values_s
+	  ipa_ipv4_route_init_values;
+	struct ipa_hwio_def_ipa_ipv6_route_init_values_s
+	  ipa_ipv6_route_init_values;
+	struct ipa_hwio_def_ipa_bam_activated_ports_s
+	  ipa_bam_activated_ports;
+	struct ipa_hwio_def_ipa_tx_commander_cmdq_status_s
+	  ipa_tx_commander_cmdq_status;
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_en_s
+	  ipa_log_buf_hw_snif_el_en;
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_wr_n_rd_sel_s
+	  ipa_log_buf_hw_snif_el_wr_n_rd_sel;
+	struct ipa_hwio_def_ipa_log_buf_hw_snif_el_cli_mux_s
+	  ipa_log_buf_hw_snif_el_cli_mux;
+	struct ipa_hwio_def_ipa_state_acl_s
+	  ipa_state_acl;
+	struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_s
+	  ipa_sys_pkt_proc_cntxt_base;
+	struct ipa_hwio_def_ipa_sys_pkt_proc_cntxt_base_msb_s
+	  ipa_sys_pkt_proc_cntxt_base_msb;
+	struct ipa_hwio_def_ipa_local_pkt_proc_cntxt_base_s
+	  ipa_local_pkt_proc_cntxt_base;
+	struct ipa_hwio_def_ipa_rsrc_grp_cfg_s
+	  ipa_rsrc_grp_cfg;
+	struct ipa_hwio_def_ipa_comp_cfg_s
+	  ipa_comp_cfg;
+	struct ipa_hwio_def_ipa_state_dpl_fifo_s
+	  ipa_state_dpl_fifo;
+	struct ipa_hwio_def_ipa_pipeline_disable_s
+	  ipa_pipeline_disable;
+	struct ipa_hwio_def_ipa_state_nlo_aggr_s
+	  ipa_state_nlo_aggr;
+	struct ipa_hwio_def_ipa_nlo_pp_cfg1_s
+	  ipa_nlo_pp_cfg1;
+	struct ipa_hwio_def_ipa_nlo_pp_cfg2_s
+	  ipa_nlo_pp_cfg2;
+	struct ipa_hwio_def_ipa_nlo_pp_ack_limit_cfg_s
+	  ipa_nlo_pp_ack_limit_cfg;
+	struct ipa_hwio_def_ipa_nlo_pp_data_limit_cfg_s
+	  ipa_nlo_pp_data_limit_cfg;
+	struct ipa_hwio_def_ipa_nlo_min_dsm_cfg_s
+	  ipa_nlo_min_dsm_cfg;
+	struct ipa_hwio_def_ipa_nlo_vp_flush_req_s
+	  ipa_nlo_vp_flush_req;
+	struct ipa_hwio_def_ipa_nlo_vp_flush_cookie_s
+	  ipa_nlo_vp_flush_cookie;
+	struct ipa_hwio_def_ipa_nlo_vp_flush_ack_s
+	  ipa_nlo_vp_flush_ack;
+	struct ipa_hwio_def_ipa_nlo_vp_dsm_open_s
+	  ipa_nlo_vp_dsm_open;
+	struct ipa_hwio_def_ipa_nlo_vp_qbap_open_s
+	  ipa_nlo_vp_qbap_open;
+};
+
+/*
+ * General IPA register save data struct
+ */
+struct ipa_reg_save_gen_ee_s {
+	struct ipa_hwio_def_ipa_irq_stts_ee_n_s
+	  ipa_irq_stts_ee_n;
+	struct ipa_hwio_def_ipa_irq_en_ee_n_s
+	  ipa_irq_en_ee_n;
+	struct ipa_hwio_def_ipa_fec_addr_ee_n_s
+	  ipa_fec_addr_ee_n;
+	struct ipa_hwio_def_ipa_fec_attr_ee_n_s
+	  ipa_fec_attr_ee_n;
+	struct ipa_hwio_def_ipa_snoc_fec_ee_n_s
+	  ipa_snoc_fec_ee_n;
+	struct ipa_hwio_def_ipa_holb_drop_irq_info_ee_n_s
+	  ipa_holb_drop_irq_info_ee_n;
+	struct ipa_hwio_def_ipa_suspend_irq_info_ee_n_s
+	  ipa_suspend_irq_info_ee_n;
+	struct ipa_hwio_def_ipa_suspend_irq_en_ee_n_s
+	  ipa_suspend_irq_en_ee_n;
+};
+
+/*
+ * Pipe Endp IPA register save data struct
+ */
+struct ipa_reg_save_pipe_endp_s {
+	struct ipa_hwio_def_ipa_endp_init_ctrl_n_s
+	  ipa_endp_init_ctrl_n;
+	struct ipa_hwio_def_ipa_endp_init_ctrl_scnd_n_s
+	  ipa_endp_init_ctrl_scnd_n;
+	struct ipa_hwio_def_ipa_endp_init_cfg_n_s
+	  ipa_endp_init_cfg_n;
+	struct ipa_hwio_def_ipa_endp_init_nat_n_s
+	  ipa_endp_init_nat_n;
+	struct ipa_hwio_def_ipa_endp_init_hdr_n_s
+	  ipa_endp_init_hdr_n;
+	struct ipa_hwio_def_ipa_endp_init_hdr_ext_n_s
+	  ipa_endp_init_hdr_ext_n;
+	struct ipa_hwio_def_ipa_endp_init_hdr_metadata_mask_n_s
+	  ipa_endp_init_hdr_metadata_mask_n;
+	struct ipa_hwio_def_ipa_endp_init_hdr_metadata_n_s
+	  ipa_endp_init_hdr_metadata_n;
+	struct ipa_hwio_def_ipa_endp_init_mode_n_s
+	  ipa_endp_init_mode_n;
+	struct ipa_hwio_def_ipa_endp_init_aggr_n_s
+	  ipa_endp_init_aggr_n;
+	struct ipa_hwio_def_ipa_endp_init_hol_block_en_n_s
+	  ipa_endp_init_hol_block_en_n;
+	struct ipa_hwio_def_ipa_endp_init_hol_block_timer_n_s
+	  ipa_endp_init_hol_block_timer_n;
+	struct ipa_hwio_def_ipa_endp_init_deaggr_n_s
+	  ipa_endp_init_deaggr_n;
+	struct ipa_hwio_def_ipa_endp_status_n_s
+	  ipa_endp_status_n;
+	struct ipa_hwio_def_ipa_endp_init_rsrc_grp_n_s
+	  ipa_endp_init_rsrc_grp_n;
+	struct ipa_hwio_def_ipa_endp_init_seq_n_s
+	  ipa_endp_init_seq_n;
+	struct ipa_hwio_def_ipa_endp_gsi_cfg_tlv_n_s
+	  ipa_endp_gsi_cfg_tlv_n;
+	struct ipa_hwio_def_ipa_endp_gsi_cfg_aos_n_s
+	  ipa_endp_gsi_cfg_aos_n;
+	struct ipa_hwio_def_ipa_endp_gsi_cfg1_n_s
+	  ipa_endp_gsi_cfg1_n;
+	struct ipa_hwio_def_ipa_endp_filter_router_hsh_cfg_n_s
+	  ipa_endp_filter_router_hsh_cfg_n;
+};
+
+/*
+ * Pipe IPA register save data struct
+ */
+struct ipa_reg_save_pipe_s {
+	u8				active;
+	struct ipa_reg_save_pipe_endp_s endp;
+};
+
+/*
+ * HWP IPA register save data struct
+ */
+struct ipa_reg_save_hwp_s {
+	struct ipa_hwio_def_ipa_uc_qmb_sys_addr_s
+	  ipa_uc_qmb_sys_addr;
+	struct ipa_hwio_def_ipa_uc_qmb_local_addr_s
+	  ipa_uc_qmb_local_addr;
+	struct ipa_hwio_def_ipa_uc_qmb_length_s
+	  ipa_uc_qmb_length;
+	struct ipa_hwio_def_ipa_uc_qmb_trigger_s
+	  ipa_uc_qmb_trigger;
+	struct ipa_hwio_def_ipa_uc_qmb_pending_tid_s
+	  ipa_uc_qmb_pending_tid;
+	struct ipa_hwio_def_ipa_uc_qmb_completed_rd_fifo_peek_s
+	  ipa_uc_qmb_completed_rd_fifo_peek;
+	struct ipa_hwio_def_ipa_uc_qmb_completed_wr_fifo_peek_s
+	  ipa_uc_qmb_completed_wr_fifo_peek;
+	struct ipa_hwio_def_ipa_uc_qmb_misc_s
+	  ipa_uc_qmb_misc;
+	struct ipa_hwio_def_ipa_uc_qmb_status_s
+	  ipa_uc_qmb_status;
+	struct ipa_hwio_def_ipa_uc_qmb_bus_attrib_s
+	  ipa_uc_qmb_bus_attrib;
+};
+
+/*
+ * IPA TESTBUS entry struct
+ */
+struct ipa_reg_save_ipa_testbus_entry_s {
+	union ipa_hwio_def_ipa_testbus_sel_u testbus_sel;
+	union ipa_hwio_def_ipa_debug_data_u testbus_data;
+};
+
+/* IPA TESTBUS global struct */
+struct ipa_reg_save_ipa_testbus_global_s {
+	struct ipa_reg_save_ipa_testbus_entry_s
+	global[IPA_TESTBUS_SEL_INTERNAL_MAX + 1]
+	[IPA_TESTBUS_SEL_EXTERNAL_MAX + 1];
+};
+
+/* IPA TESTBUS per EP struct */
+struct ipa_reg_save_ipa_testbus_ep_s {
+	struct ipa_reg_save_ipa_testbus_entry_s
+	entry_ep[IPA_TESTBUS_SEL_INTERNAL_PIPE_MAX + 1]
+	[IPA_TESTBUS_SEL_EXTERNAL_MAX + 1];
+};
+
+/* IPA TESTBUS per EP struct */
+struct ipa_reg_save_ipa_testbus_ep_rsrc_s {
+	struct ipa_reg_save_ipa_testbus_entry_s
+	  entry_ep[IPA_DEBUG_TESTBUS_RSRC_NUM_GRP];
+};
+
+/* IPA TESTBUS save data struct */
+struct ipa_reg_save_ipa_testbus_s {
+	struct ipa_reg_save_ipa_testbus_global_s global;
+	struct ipa_reg_save_ipa_testbus_ep_s
+	  ep[IPA_TESTBUS_SEL_EP_MAX + 1];
+	struct ipa_reg_save_ipa_testbus_ep_rsrc_s
+	  ep_rsrc[IPA_DEBUG_TESTBUS_RSRC_NUM_EP];
+};
+
+/*
+ * Debug IPA register save data struct
+ */
+struct ipa_reg_save_dbg_s {
+	struct ipa_hwio_def_ipa_debug_data_s
+	  ipa_debug_data;
+	struct ipa_hwio_def_ipa_step_mode_status_s
+	  ipa_step_mode_status;
+	struct ipa_hwio_def_ipa_step_mode_breakpoints_s
+	  ipa_step_mode_breakpoints;
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_cmd_n_s
+	  ipa_rx_splt_cmdq_cmd_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_cfg_n_s
+	 ipa_rx_splt_cmdq_cfg_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_0_n_s
+	  ipa_rx_splt_cmdq_data_wr_0_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_1_n_s
+	  ipa_rx_splt_cmdq_data_wr_1_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_2_n_s
+	  ipa_rx_splt_cmdq_data_wr_2_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_wr_3_n_s
+	  ipa_rx_splt_cmdq_data_wr_3_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_0_n_s
+	  ipa_rx_splt_cmdq_data_rd_0_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_1_n_s
+	  ipa_rx_splt_cmdq_data_rd_1_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_2_n_s
+	  ipa_rx_splt_cmdq_data_rd_2_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_data_rd_3_n_s
+	  ipa_rx_splt_cmdq_data_rd_3_n[IPA_RX_SPLT_CMDQ_MAX];
+	struct ipa_hwio_def_ipa_rx_splt_cmdq_status_n_s
+	  ipa_rx_splt_cmdq_status_n[IPA_RX_SPLT_CMDQ_MAX];
+
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_cmd_s
+	  ipa_rx_hps_cmdq_cmd;
+	union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_u
+		ipa_rx_hps_cmdq_data_rd_0_arr[
+		IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_1_u
+		ipa_rx_hps_cmdq_data_rd_1_arr[
+		IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_2_u
+		ipa_rx_hps_cmdq_data_rd_2_arr[
+		IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_3_u
+		ipa_rx_hps_cmdq_data_rd_3_arr[
+		IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_count_u
+	  ipa_rx_hps_cmdq_count_arr[IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_rx_hps_cmdq_status_u
+	  ipa_rx_hps_cmdq_status_arr[IPA_DEBUG_CMDQ_HPS_SELECT_NUM_GROUPS];
+	struct ipa_hwio_def_ipa_rx_hps_cmdq_status_empty_s
+	  ipa_rx_hps_cmdq_status_empty;
+	struct ipa_hwio_def_ipa_rx_hps_clients_min_depth_0_s
+	  ipa_rx_hps_clients_min_depth_0;
+	struct ipa_hwio_def_ipa_rx_hps_clients_max_depth_0_s
+	  ipa_rx_hps_clients_max_depth_0;
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_cmd_s
+	  ipa_hps_dps_cmdq_cmd;
+	union ipa_hwio_def_ipa_hps_dps_cmdq_data_rd_0_u
+		ipa_hps_dps_cmdq_data_rd_0_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_hps_dps_cmdq_count_u
+		ipa_hps_dps_cmdq_count_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_hps_dps_cmdq_status_u
+		ipa_hps_dps_cmdq_status_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	struct ipa_hwio_def_ipa_hps_dps_cmdq_status_empty_s
+	  ipa_hps_dps_cmdq_status_empty;
+
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_cmd_s
+	  ipa_dps_tx_cmdq_cmd;
+	union ipa_hwio_def_ipa_dps_tx_cmdq_data_rd_0_u
+		ipa_dps_tx_cmdq_data_rd_0_arr[
+		IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_dps_tx_cmdq_count_u
+		ipa_dps_tx_cmdq_count_arr[IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_dps_tx_cmdq_status_u
+	ipa_dps_tx_cmdq_status_arr[IPA_DEBUG_CMDQ_DPS_SELECT_NUM_GROUPS];
+	struct ipa_hwio_def_ipa_dps_tx_cmdq_status_empty_s
+	  ipa_dps_tx_cmdq_status_empty;
+
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_cmd_s
+	  ipa_ackmngr_cmdq_cmd;
+	union ipa_hwio_def_ipa_ackmngr_cmdq_data_rd_u
+		ipa_ackmngr_cmdq_data_rd_arr[
+		IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_ackmngr_cmdq_count_u
+	  ipa_ackmngr_cmdq_count_arr[IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS];
+	union ipa_hwio_def_ipa_ackmngr_cmdq_status_u
+		ipa_ackmngr_cmdq_status_arr[
+		IPA_DEBUG_CMDQ_ACK_SELECT_NUM_GROUPS];
+	struct ipa_hwio_def_ipa_ackmngr_cmdq_status_empty_s
+	  ipa_ackmngr_cmdq_status_empty;
+
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_cmd_s
+	  ipa_prod_ackmngr_cmdq_cmd;
+	union ipa_hwio_def_ipa_prod_ackmngr_cmdq_data_rd_u
+		ipa_prod_ackmngr_cmdq_data_rd_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_prod_ackmngr_cmdq_count_u
+		ipa_prod_ackmngr_cmdq_count_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_u
+		ipa_prod_ackmngr_cmdq_status_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	struct ipa_hwio_def_ipa_prod_ackmngr_cmdq_status_empty_s
+	  ipa_prod_ackmngr_cmdq_status_empty;
+
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_cmd_s
+	  ipa_ntf_tx_cmdq_cmd;
+	union ipa_hwio_def_ipa_ntf_tx_cmdq_data_rd_0_u
+		ipa_ntf_tx_cmdq_data_rd_0_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_ntf_tx_cmdq_count_u
+		ipa_ntf_tx_cmdq_count_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	union ipa_hwio_def_ipa_ntf_tx_cmdq_status_u
+		ipa_ntf_tx_cmdq_status_arr[IPA_TESTBUS_SEL_EP_MAX + 1];
+	struct ipa_hwio_def_ipa_ntf_tx_cmdq_status_empty_s
+	  ipa_ntf_tx_cmdq_status_empty;
+
+	union ipa_hwio_def_ipa_rsrc_mngr_db_rsrc_read_u
+		ipa_rsrc_mngr_db_rsrc_read_arr[IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX +
+					       1][IPA_RSCR_MNGR_DB_RSRC_ID_MAX
+						  + 1];
+	union ipa_hwio_def_ipa_rsrc_mngr_db_list_read_u
+		ipa_rsrc_mngr_db_list_read_arr[IPA_RSCR_MNGR_DB_RSRC_TYPE_MAX +
+					       1][IPA_RSCR_MNGR_DB_RSRC_ID_MAX
+						  + 1];
+};
+
+/* Source Resource Group IPA register save data struct */
+struct ipa_reg_save_src_rsrc_grp_s {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_01_rsrc_type_n_s
+	  ipa_src_rsrc_grp_01_rsrc_type_n;
+	struct ipa_hwio_def_ipa_src_rsrc_grp_23_rsrc_type_n_s
+	  ipa_src_rsrc_grp_23_rsrc_type_n;
+};
+
+/* Source Resource Group IPA register save data struct */
+struct ipa_reg_save_dst_rsrc_grp_s {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_01_rsrc_type_n_s
+	  ipa_dst_rsrc_grp_01_rsrc_type_n;
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_23_rsrc_type_n_s
+	  ipa_dst_rsrc_grp_23_rsrc_type_n;
+};
+
+/* Source Resource Group Count IPA register save data struct */
+struct ipa_reg_save_src_rsrc_cnt_s {
+	struct ipa_hwio_def_ipa_src_rsrc_grp_0123_rsrc_type_cnt_n_s
+	  ipa_src_rsrc_grp_0123_rsrc_type_cnt_n;
+};
+
+/* Destination Resource Group Count IPA register save data struct */
+struct ipa_reg_save_dst_rsrc_cnt_s {
+	struct ipa_hwio_def_ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n_s
+	  ipa_dst_rsrc_grp_0123_rsrc_type_cnt_n;
+};
+
+/* GSI General register save data struct */
+struct ipa_reg_save_gsi_gen_s {
+	struct gsi_hwio_def_gsi_cfg_s
+	  gsi_cfg;
+	struct gsi_hwio_def_gsi_ree_cfg_s
+	  gsi_ree_cfg;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_inst_ram_n_s
+	  ipa_gsi_top_gsi_inst_ram_n;
+};
+
+/* GSI General EE register save data struct */
+struct ipa_reg_save_gsi_gen_ee_s {
+	struct gsi_hwio_def_gsi_manager_ee_qos_n_s
+	  gsi_manager_ee_qos_n;
+	struct gsi_hwio_def_ee_n_gsi_status_s
+	  ee_n_gsi_status;
+	struct gsi_hwio_def_ee_n_cntxt_type_irq_s
+	  ee_n_cntxt_type_irq;
+	struct gsi_hwio_def_ee_n_cntxt_type_irq_msk_s
+	  ee_n_cntxt_type_irq_msk;
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_s
+	  ee_n_cntxt_src_gsi_ch_irq;
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_s
+	  ee_n_cntxt_src_ev_ch_irq;
+	struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_s
+	  ee_n_cntxt_src_gsi_ch_irq_msk;
+	struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_s
+	  ee_n_cntxt_src_ev_ch_irq_msk;
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_s
+	  ee_n_cntxt_src_ieob_irq;
+	struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_s
+	  ee_n_cntxt_src_ieob_irq_msk;
+	struct gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_s
+	  ee_n_cntxt_gsi_irq_stts;
+	struct gsi_hwio_def_ee_n_cntxt_glob_irq_stts_s
+	  ee_n_cntxt_glob_irq_stts;
+	struct gsi_hwio_def_ee_n_error_log_s
+	  ee_n_error_log;
+	struct gsi_hwio_def_ee_n_cntxt_scratch_0_s
+	  ee_n_cntxt_scratch_0;
+	struct gsi_hwio_def_ee_n_cntxt_scratch_1_s
+	  ee_n_cntxt_scratch_1;
+	struct gsi_hwio_def_ee_n_cntxt_intset_s
+	  ee_n_cntxt_intset;
+	struct gsi_hwio_def_ee_n_cntxt_msi_base_lsb_s
+	  ee_n_cntxt_msi_base_lsb;
+	struct gsi_hwio_def_ee_n_cntxt_msi_base_msb_s
+	  ee_n_cntxt_msi_base_msb;
+};
+
+static u32 ipa_reg_save_gsi_ch_test_bus_selector_array[] = {
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_ZEROS,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MCS_4,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_DB_ENG,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_REE_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_4,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_EVE_5,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IE_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_IC_4,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_MOQA_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_TMR_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_0,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_1,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_2,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_RD_WR_3,
+	HWIO_GSI_DEBUG_TEST_BUS_SELECTOR_CSR,
+};
+
+/*
+ * GSI QSB debug bus register save data struct
+ */
+struct ipa_reg_save_gsi_test_bus_s {
+	struct
+	  gsi_hwio_def_gsi_test_bus_reg_s
+	  test_bus_reg[ARRAY_SIZE(ipa_reg_save_gsi_ch_test_bus_selector_array)];
+};
+
+/* GSI debug MCS registers save data struct */
+struct ipa_reg_save_gsi_mcs_regs_s {
+	struct
+	  gsi_hwio_def_gsi_debug_sw_rf_n_read_s
+		mcs_reg[HWIO_GSI_DEBUG_SW_RF_n_READ_MAXn + 1];
+};
+
+/* GSI debug counters save data struct */
+struct ipa_reg_save_gsi_debug_cnt_s {
+	struct
+	  gsi_hwio_def_gsi_debug_countern_s
+		cnt[HWIO_GSI_DEBUG_COUNTERn_MAXn + 1];
+};
+
+/* GSI IRAM pointers (IEP) save data struct */
+struct ipa_reg_save_gsi_iram_ptr_regs_s {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_cmd_s
+	  ipa_gsi_top_gsi_iram_ptr_ch_cmd;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd_s
+	  ipa_gsi_top_gsi_iram_ptr_ee_generic_cmd;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_db_s
+	  ipa_gsi_top_gsi_iram_ptr_ch_db;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ev_db_s
+	  ipa_gsi_top_gsi_iram_ptr_ev_db;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_new_re_s
+	  ipa_gsi_top_gsi_iram_ptr_new_re;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_dis_comp_s
+	  ipa_gsi_top_gsi_iram_ptr_ch_dis_comp;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_ch_empty_s
+	  ipa_gsi_top_gsi_iram_ptr_ch_empty;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_event_gen_comp_s
+	  ipa_gsi_top_gsi_iram_ptr_event_gen_comp;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_timer_expired_s
+	  ipa_gsi_top_gsi_iram_ptr_timer_expired;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_write_eng_comp_s
+	  ipa_gsi_top_gsi_iram_ptr_write_eng_comp;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_read_eng_comp_s
+	  ipa_gsi_top_gsi_iram_ptr_read_eng_comp;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_uc_gp_int_s
+	  ipa_gsi_top_gsi_iram_ptr_uc_gp_int;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_iram_ptr_int_mod_stopped_s
+	  ipa_gsi_top_gsi_iram_ptr_int_mod_stopped;
+};
+
+/* GSI SHRAM pointers save data struct */
+struct ipa_reg_save_gsi_shram_ptr_regs_s {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_ch_cntxt_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_ev_cntxt_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_re_storage_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_re_esc_buf_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_ee_scrach_base_addr;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr_s
+	  ipa_gsi_top_gsi_shram_ptr_func_stack_base_addr;
+};
+
+/* GSI debug register save data struct */
+struct ipa_reg_save_gsi_debug_s {
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_busy_reg_s
+	  ipa_gsi_top_gsi_debug_busy_reg;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_event_pending_s
+	  ipa_gsi_top_gsi_debug_event_pending;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_timer_pending_s
+	  ipa_gsi_top_gsi_debug_timer_pending;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_rd_wr_pending_s
+	  ipa_gsi_top_gsi_debug_rd_wr_pending;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_from_sw_s
+	  ipa_gsi_top_gsi_debug_pc_from_sw;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_sw_stall_s
+	  ipa_gsi_top_gsi_debug_sw_stall;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_pc_for_debug_s
+	  ipa_gsi_top_gsi_debug_pc_for_debug;
+	struct ipa_hwio_def_ipa_gsi_top_gsi_debug_qsb_log_err_trns_id_s
+	  ipa_gsi_top_gsi_debug_qsb_log_err_trns_id;
+	struct ipa_reg_save_gsi_test_bus_s		gsi_test_bus;
+	struct ipa_reg_save_gsi_mcs_regs_s		gsi_mcs_regs;
+	struct ipa_reg_save_gsi_debug_cnt_s		gsi_cnt_regs;
+	struct ipa_reg_save_gsi_iram_ptr_regs_s		gsi_iram_ptrs;
+	struct ipa_reg_save_gsi_shram_ptr_regs_s	gsi_shram_ptrs;
+};
+
+/* GSI MCS channel scratch registers save data struct */
+struct ipa_reg_save_gsi_mcs_channel_scratch_regs_s {
+	struct gsi_hwio_def_gsi_shram_n_s
+	  scratch4;
+	struct gsi_hwio_def_gsi_shram_n_s
+	  scratch5;
+};
+
+/* GSI Channel Context register save data struct */
+struct ipa_reg_save_gsi_ch_cntxt_per_ep_s {
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_s
+	  ee_n_gsi_ch_k_cntxt_0;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_s
+	  ee_n_gsi_ch_k_cntxt_1;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_s
+	  ee_n_gsi_ch_k_cntxt_2;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_s
+	  ee_n_gsi_ch_k_cntxt_3;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_s
+	  ee_n_gsi_ch_k_cntxt_4;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_s
+	  ee_n_gsi_ch_k_cntxt_5;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_s
+	  ee_n_gsi_ch_k_cntxt_6;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_s
+	  ee_n_gsi_ch_k_cntxt_7;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_s
+	  ee_n_gsi_ch_k_re_fetch_read_ptr;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_s
+	  ee_n_gsi_ch_k_re_fetch_write_ptr;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_qos_s
+	  ee_n_gsi_ch_k_qos;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_s
+	  ee_n_gsi_ch_k_scratch_0;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_s
+	  ee_n_gsi_ch_k_scratch_1;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_s
+	  ee_n_gsi_ch_k_scratch_2;
+	struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_s
+	  ee_n_gsi_ch_k_scratch_3;
+	struct gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_s
+	  gsi_map_ee_n_ch_k_vp_table;
+	struct ipa_reg_save_gsi_mcs_channel_scratch_regs_s mcs_channel_scratch;
+};
+
+/* GSI Event Context register save data struct */
+struct ipa_reg_save_gsi_evt_cntxt_per_ep_s {
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_s
+	  ee_n_ev_ch_k_cntxt_0;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_s
+	  ee_n_ev_ch_k_cntxt_1;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_s
+	  ee_n_ev_ch_k_cntxt_2;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_s
+	  ee_n_ev_ch_k_cntxt_3;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_s
+	  ee_n_ev_ch_k_cntxt_4;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_s
+	  ee_n_ev_ch_k_cntxt_5;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_s
+	  ee_n_ev_ch_k_cntxt_6;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_s
+	  ee_n_ev_ch_k_cntxt_7;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_s
+	  ee_n_ev_ch_k_cntxt_8;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_s
+	  ee_n_ev_ch_k_cntxt_9;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_s
+	  ee_n_ev_ch_k_cntxt_10;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_s
+	  ee_n_ev_ch_k_cntxt_11;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_s
+	  ee_n_ev_ch_k_cntxt_12;
+	struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_s
+	  ee_n_ev_ch_k_cntxt_13;
+	struct gsi_hwio_def_ee_n_ev_ch_k_scratch_0_s
+	  ee_n_ev_ch_k_scratch_0;
+	struct gsi_hwio_def_ee_n_ev_ch_k_scratch_1_s
+	  ee_n_ev_ch_k_scratch_1;
+	struct gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_s
+	  gsi_debug_ee_n_ev_k_vp_table;
+};
+
+/* GSI FIFO status register save data struct */
+struct ipa_reg_save_gsi_fifo_status_s {
+	union ipa_hwio_def_ipa_gsi_fifo_status_ctrl_u
+		gsi_fifo_status_ctrl;
+	union ipa_hwio_def_ipa_gsi_tlv_fifo_status_u
+		gsi_tlv_fifo_status;
+	union ipa_hwio_def_ipa_gsi_aos_fifo_status_u
+		gsi_aos_fifo_status;
+};
+
+/* GSI Channel Context register save top level data struct */
+struct ipa_reg_save_gsi_ch_cntxt_s {
+	struct ipa_reg_save_gsi_ch_cntxt_per_ep_s
+		a7[IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7];
+	struct ipa_reg_save_gsi_ch_cntxt_per_ep_s
+		q6[IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_Q6];
+	struct ipa_reg_save_gsi_ch_cntxt_per_ep_s
+		uc[IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC];
+};
+
+/* GSI Event Context register save top level data struct */
+struct ipa_reg_save_gsi_evt_cntxt_s {
+	struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
+		a7[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7];
+	struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
+		q6[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_Q6];
+	struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
+		uc[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_UC];
+};
+
+/* Top level IPA register save data struct */
+struct ipa_regs_save_hierarchy_s {
+	struct ipa_gen_regs_s
+		gen;
+	struct ipa_reg_save_gen_ee_s
+		gen_ee[IPA_HW_EE_MAX];
+	struct ipa_reg_save_hwp_s
+		hwp;
+	struct ipa_reg_save_dbg_s
+		dbg;
+	struct ipa_reg_save_ipa_testbus_s
+		*testbus;
+	struct ipa_reg_save_pipe_s
+		pipes[IPA_HW_PIPE_ID_MAX];
+	struct ipa_reg_save_src_rsrc_grp_s
+		src_rsrc_grp[IPA_HW_SRC_RSRP_TYPE_MAX];
+	struct ipa_reg_save_dst_rsrc_grp_s
+		dst_rsrc_grp[IPA_HW_DST_RSRP_TYPE_MAX];
+	struct ipa_reg_save_src_rsrc_cnt_s
+		src_rsrc_cnt[IPA_HW_SRC_RSRP_TYPE_MAX];
+	struct ipa_reg_save_dst_rsrc_cnt_s
+		dst_rsrc_cnt[IPA_HW_DST_RSRP_TYPE_MAX];
+};
+
+/* Top level GSI register save data struct */
+struct gsi_regs_save_hierarchy_s {
+	u32 fw_ver;
+	struct ipa_reg_save_gsi_gen_s		gen;
+	struct ipa_reg_save_gsi_gen_ee_s	gen_ee[IPA_REG_SAVE_GSI_NUM_EE];
+	struct ipa_reg_save_gsi_ch_cntxt_s	ch_cntxt;
+	struct ipa_reg_save_gsi_evt_cntxt_s	evt_cntxt;
+	struct ipa_reg_save_gsi_debug_s		debug;
+};
+
+/* Source resources for a resource group */
+struct ipa_reg_save_src_rsrc_cnts_s {
+	u8 pkt_cntxt;
+	u8 descriptor_list;
+	u8 data_descriptor_buffer;
+	u8 hps_dmars;
+	u8 reserved_acks;
+};
+
+/* Destination resources for a resource group */
+struct ipa_reg_save_dst_rsrc_cnts_s {
+	u8 reserved_sectors;
+	u8 dps_dmars;
+};
+
+/* Resource count structure for a resource group */
+struct ipa_reg_save_rsrc_cnts_per_grp_s {
+	/* Resource group number */
+	u8 resource_group;
+	/* Source resources for a resource group */
+	struct ipa_reg_save_src_rsrc_cnts_s src;
+	/* Destination resources for a resource group */
+	struct ipa_reg_save_dst_rsrc_cnts_s dst;
+};
+
+/* Top level resource count structure */
+struct ipa_reg_save_rsrc_cnts_s {
+	/* Resource count structure for PCIE group */
+	struct ipa_reg_save_rsrc_cnts_per_grp_s pcie;
+	/* Resource count structure for DDR group */
+	struct ipa_reg_save_rsrc_cnts_per_grp_s ddr;
+};
+
+/*
+ * Top level IPA and GSI registers save data struct
+ */
+struct regs_save_hierarchy_s {
+	struct ipa_regs_save_hierarchy_s
+		ipa;
+	struct gsi_regs_save_hierarchy_s
+		gsi;
+	bool
+		pkt_ctntx_active[IPA_HW_PKT_CTNTX_MAX];
+	union ipa_hwio_def_ipa_ctxh_ctrl_u
+		pkt_ctntxt_lock;
+	enum ipa_hw_pkt_cntxt_state_e
+		pkt_cntxt_state[IPA_HW_PKT_CTNTX_MAX];
+	struct ipa_pkt_ctntx_s
+		pkt_ctntx[IPA_HW_PKT_CTNTX_MAX];
+	struct ipa_reg_save_rsrc_cnts_s
+		rsrc_cnts;
+	struct ipa_reg_save_gsi_fifo_status_s
+		gsi_fifo_status[IPA_HW_PIPE_ID_MAX];
+};
+
+/*
+ * The following section deals with handling IPA registers' memory
+ * access relative to pre-defined memory protection schemes
+ * (ie. "access control").
+ *
+ * In a nut shell, the intent of the data stuctures below is to allow
+ * higher level register accessors to be unaware of what really is
+ * going on at the lowest level (ie. real vs non-real access).  This
+ * methodology is also designed to allow for platform specific "access
+ * maps."
+ */
+
+/*
+ * Function for doing an actual read
+ */
+static inline u32
+act_read(void __iomem *addr)
+{
+	u32 val = my_in_dword(addr);
+
+	return val;
+}
+
+/*
+ * Function for doing an actual write
+ */
+static inline void
+act_write(void __iomem *addr, u32 val)
+{
+	my_out_dword(addr, val);
+}
+
+/*
+ * Function that pretends to do a read
+ */
+static inline u32
+nop_read(void __iomem *addr)
+{
+	return IPA_MEM_INIT_VAL;
+}
+
+/*
+ * Function that pretends to do a write
+ */
+static inline void
+nop_write(void __iomem *addr, u32 val)
+{
+}
+
+/*
+ * The following are used to define struct reg_access_funcs_s below...
+ */
+typedef u32 (*reg_read_func_t)(
+	void __iomem *addr);
+typedef void (*reg_write_func_t)(
+	void __iomem *addr,
+	u32 val);
+
+/*
+ * The following in used to define io_matrix[] below...
+ */
+struct reg_access_funcs_s {
+	reg_read_func_t  read;
+	reg_write_func_t write;
+};
+
+/*
+ * The following will be used to appropriately index into the
+ * read/write combos defined in io_matrix[] below...
+ */
+#define AA_COMBO 0 /* actual read, actual write */
+#define AN_COMBO 1 /* actual read, no-op write  */
+#define NA_COMBO 2 /* no-op read,  actual write */
+#define NN_COMBO 3 /* no-op read,  no-op write  */
+
+/*
+ * The following will be used to dictate registers' access methods
+ * relative to the state of secure debug...whether it's enabled or
+ * disabled.
+ *
+ * NOTE: The table below defines all access combinations.
+ */
+static struct reg_access_funcs_s io_matrix[] = {
+	{ act_read, act_write }, /* the AA_COMBO */
+	{ act_read, nop_write }, /* the AN_COMBO */
+	{ nop_read, act_write }, /* the NA_COMBO */
+	{ nop_read, nop_write }, /* the NN_COMBO */
+};
+
+/*
+ * The following will be used to define and drive IPA's register
+ * access rules.
+ */
+struct reg_mem_access_map_t {
+	u32 addr_range_begin;
+	u32 addr_range_end;
+	struct reg_access_funcs_s *access[2];
+};
+
+#endif /* #if !defined(_IPA_REG_DUMP_H_) */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index d575a2e..3520b67 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -4411,8 +4411,10 @@
 	if (res)
 		IPAERR("uC panic handler failed %d\n", res);
 
-	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) != 0)
+	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) != 0) {
 		ipahal_print_all_regs(false);
+		ipa_save_registers();
+	}
 
 	return NOTIFY_DONE;
 }
@@ -4582,6 +4584,9 @@
 	if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
 		ipa3_proxy_clk_vote();
 
+	/* The following will retrieve and save the gsi fw version */
+	ipa_save_gsi_ver();
+
 	if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
 		ipa3_ctx->pdev)) {
 		IPAERR("fail to init ipahal\n");
@@ -5166,6 +5171,32 @@
 	return 0;
 }
 
+/*
+ * SCM call to check if secure dump is allowed.
+ *
+ * Returns true in secure dump allowed.
+ * Return false when secure dump not allowed.
+ */
+#define TZ_UTIL_GET_SEC_DUMP_STATE  0x10
+static bool ipa_is_mem_dump_allowed(void)
+{
+	struct scm_desc desc = {0};
+	int ret = 0;
+
+	desc.args[0] = 0;
+	desc.arginfo = 0;
+
+	ret = scm_call2(
+		SCM_SIP_FNID(SCM_SVC_UTIL, TZ_UTIL_GET_SEC_DUMP_STATE),
+		&desc);
+	if (ret) {
+		IPAERR("SCM DUMP_STATE call failed\n");
+		return false;
+	}
+
+	return (desc.ret[0] == 1);
+}
+
 /**
  * ipa3_pre_init() - Initialize the IPA Driver.
  * This part contains all initialization which doesn't require IPA HW, such
@@ -5248,6 +5279,37 @@
 	ipa3_ctx->ipa_config_is_mhi = resource_p->ipa_mhi_dynamic_config;
 	ipa3_ctx->mhi_evid_limits[0] = resource_p->mhi_evid_limits[0];
 	ipa3_ctx->mhi_evid_limits[1] = resource_p->mhi_evid_limits[1];
+	ipa3_ctx->entire_ipa_block_size = resource_p->entire_ipa_block_size;
+	ipa3_ctx->do_register_collection_on_crash =
+	    resource_p->do_register_collection_on_crash;
+	ipa3_ctx->do_testbus_collection_on_crash =
+	    resource_p->do_testbus_collection_on_crash;
+	ipa3_ctx->do_non_tn_collection_on_crash =
+	    resource_p->do_non_tn_collection_on_crash;
+	ipa3_ctx->secure_debug_check_action =
+		resource_p->secure_debug_check_action;
+
+	if (ipa3_ctx->secure_debug_check_action == USE_SCM) {
+		if (ipa_is_mem_dump_allowed())
+			ipa3_ctx->sd_state = SD_ENABLED;
+		else
+			ipa3_ctx->sd_state = SD_DISABLED;
+	} else {
+		if (ipa3_ctx->secure_debug_check_action == OVERRIDE_SCM_TRUE)
+			ipa3_ctx->sd_state = SD_ENABLED;
+		else
+			/* secure_debug_check_action == OVERRIDE_SCM_FALSE */
+			ipa3_ctx->sd_state = SD_DISABLED;
+	}
+
+	if (ipa3_ctx->sd_state == SD_ENABLED) {
+		/* secure debug is enabled. */
+		IPADBG("secure debug enabled\n");
+	} else {
+		/* secure debug is disabled. */
+		IPADBG("secure debug disabled\n");
+		ipa3_ctx->do_testbus_collection_on_crash = false;
+	}
 
 	WARN(ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_NORMAL,
 		"Non NORMAL IPA HW mode, is this emulation platform ?");
@@ -5366,6 +5428,14 @@
 	    resource_p->ipa_mem_size);
 
 	/*
+	 * Setup access for register collection/dump on crash
+	 */
+	if (ipa_reg_save_init(IPA_MEM_INIT_VAL) != 0) {
+		result = -EFAULT;
+		goto fail_gsi_map;
+	}
+
+	/*
 	 * Since we now know where the transport's registers live,
 	 * let's set up access to them.  This is done since subseqent
 	 * functions, that deal with the transport, require the
@@ -5683,6 +5753,8 @@
 fail_init_hw:
 	gsi_unmap_base();
 fail_gsi_map:
+	if (ipa3_ctx->reg_collection_base)
+		iounmap(ipa3_ctx->reg_collection_base);
 	iounmap(ipa3_ctx->mmio);
 fail_remap:
 	ipa3_disable_clks();
@@ -6151,6 +6223,59 @@
 		       ipa_drv_res->emulator_intcntrlr_mem_size);
 	}
 
+	ipa_drv_res->entire_ipa_block_size = 0x100000;
+	result = of_property_read_u32(pdev->dev.of_node,
+				      "qcom,entire-ipa-block-size",
+				      &ipa_drv_res->entire_ipa_block_size);
+	IPADBG(": entire_ipa_block_size = %d\n",
+	       ipa_drv_res->entire_ipa_block_size);
+
+	/*
+	 * We'll read register-collection-on-crash here, but log it
+	 * later below because its value may change based on other
+	 * subsequent dtsi reads......
+	 */
+	ipa_drv_res->do_register_collection_on_crash =
+	    of_property_read_bool(pdev->dev.of_node,
+				  "qcom,register-collection-on-crash");
+	/*
+	 * We'll read testbus-collection-on-crash here...
+	 */
+	ipa_drv_res->do_testbus_collection_on_crash =
+	    of_property_read_bool(pdev->dev.of_node,
+				  "qcom,testbus-collection-on-crash");
+	IPADBG(": doing testbus collection on crash = %u\n",
+	       ipa_drv_res->do_testbus_collection_on_crash);
+
+	/*
+	 * We'll read non-tn-collection-on-crash here...
+	 */
+	ipa_drv_res->do_non_tn_collection_on_crash =
+	    of_property_read_bool(pdev->dev.of_node,
+				  "qcom,non-tn-collection-on-crash");
+	IPADBG(": doing non-tn collection on crash = %u\n",
+	       ipa_drv_res->do_non_tn_collection_on_crash);
+
+	if (ipa_drv_res->do_testbus_collection_on_crash ||
+		ipa_drv_res->do_non_tn_collection_on_crash)
+		ipa_drv_res->do_register_collection_on_crash = true;
+
+	IPADBG(": doing register collection on crash = %u\n",
+	       ipa_drv_res->do_register_collection_on_crash);
+
+	result = of_property_read_u32(
+		pdev->dev.of_node,
+		"qcom,secure-debug-check-action",
+		&ipa_drv_res->secure_debug_check_action);
+	if (result ||
+		(ipa_drv_res->secure_debug_check_action != 0 &&
+		 ipa_drv_res->secure_debug_check_action != 1 &&
+		 ipa_drv_res->secure_debug_check_action != 2))
+		ipa_drv_res->secure_debug_check_action = USE_SCM;
+
+	IPADBG(": secure-debug-check-action = %d\n",
+		   ipa_drv_res->secure_debug_check_action);
+
 	return 0;
 }
 
@@ -6948,8 +7073,14 @@
 
 	switch (in->smmu_client) {
 	case IPA_SMMU_WLAN_CLIENT:
-		is_smmu_enable = !(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] |
-			ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
+		if (ipa3_ctx->ipa_wdi3_over_gsi)
+			is_smmu_enable =
+				!(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP] |
+				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
+		else
+			is_smmu_enable =
+				!(ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC] |
+				ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN]);
 		break;
 	default:
 		is_smmu_enable = false;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 4aaa9c9..1f28884 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -1556,7 +1556,7 @@
 	int cnt = 0;
 	int i;
 
-	for (i = 0; i < IPA_EVENT_MAX_NUM; i++) {
+	for (i = 0; i < ARRAY_SIZE(ipa3_event_name); i++) {
 		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
 				"msg[%u:%27s] W:%u R:%u\n", i,
 				ipa3_event_name[i],
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index f8abb5c..f8283ee 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -2836,8 +2836,7 @@
 
 		IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_pkts);
 		if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
-			status.endp_src_idx >= ipa3_ctx->ipa_num_pipes ||
-			status.pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
+			status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
 			IPAERR("status fields invalid\n");
 			WARN_ON(1);
 			goto bail;
@@ -3066,6 +3065,10 @@
 	struct ipa3_sys_context *coal_sys;
 	int ipa_ep_idx;
 
+	if (!notify) {
+		IPAERR_RL("gsi_chan_xfer_notify is null\n");
+		return;
+	}
 	rx_skb = handle_skb_completion(notify, true);
 
 	if (rx_skb) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c
index 9d6ea9e..06b6bfc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/errno.h>
@@ -80,6 +80,9 @@
 	{"qcom,use-rg10-limitation-mitigation", false},
 	{"qcom,do-not-use-ch-gsi-20",           false},
 	{"qcom,use-ipa-pm",                     true},
+	{"qcom,register-collection-on-crash",   true},
+	{"qcom,testbus-collection-on-crash",    true},
+	{"qcom,non-tn-collection-on-crash",     true},
 };
 
 static struct dtsi_replacement_bool ipa3_plat_drv_bool_4_0[] = {
@@ -93,6 +96,9 @@
 	{"qcom,use-rg10-limitation-mitigation", false},
 	{"qcom,do-not-use-ch-gsi-20",           false},
 	{"qcom,use-ipa-pm",                     false},
+	{"qcom,register-collection-on-crash",   true},
+	{"qcom,testbus-collection-on-crash",    true},
+	{"qcom,non-tn-collection-on-crash",     true},
 };
 
 static struct dtsi_replacement_bool ipa3_plat_drv_bool_3_5_1[] = {
@@ -106,6 +112,9 @@
 	{"qcom,use-rg10-limitation-mitigation", false},
 	{"qcom,do-not-use-ch-gsi-20",           false},
 	{"qcom,use-ipa-pm",                     false},
+	{"qcom,register-collection-on-crash",   true},
+	{"qcom,testbus-collection-on-crash",    true},
+	{"qcom,non-tn-collection-on-crash",     true},
 };
 
 static struct dtsi_replacement_bool_table
@@ -126,6 +135,7 @@
 	{"qcom,ee",                             0},
 	{"qcom,msm-bus,num-cases",              5},
 	{"emulator-bar0-offset",                0x01C00000},
+	{"qcom,entire-ipa-block-size",          0x00100000},
 };
 
 static struct dtsi_replacement_u32 ipa3_plat_drv_u32_4_0[] = {
@@ -135,6 +145,7 @@
 	{"qcom,lan-rx-ring-size",               192},
 	{"qcom,ee",                             0},
 	{"emulator-bar0-offset",                0x01C00000},
+	{"qcom,entire-ipa-block-size",          0x00100000},
 };
 
 static struct dtsi_replacement_u32 ipa3_plat_drv_u32_3_5_1[] = {
@@ -144,6 +155,7 @@
 	{"qcom,lan-rx-ring-size",               192},
 	{"qcom,ee",                             0},
 	{"emulator-bar0-offset",                0x01C00000},
+	{"qcom,entire-ipa-block-size",          0x00100000},
 };
 
 static struct dtsi_replacement_u32_table ipa3_plat_drv_u32_table[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 6ce614f..dad8582 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include "ipa_i.h"
@@ -54,7 +54,7 @@
 }
 
 static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
-	u32 hdr_base_addr)
+	u64 hdr_base_addr)
 {
 	struct ipa3_hdr_proc_ctx_entry *entry;
 	int ret;
@@ -109,10 +109,10 @@
  *
  * Returns:	0 on success, negative on failure
  */
-static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
+static int ipa3_generate_hdr_proc_ctx_hw_tbl(u64 hdr_sys_addr,
 	struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
 {
-	u32 hdr_base_addr;
+	u64 hdr_base_addr;
 
 	mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 7eb6421..5d74ce6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -289,6 +289,19 @@
 #define IPA_FWS_PATH_3_5_1   "ipa/3.5.1/ipa_fws.elf"
 #define IPA_FWS_PATH_4_5     "ipa/4.5/ipa_fws.elf"
 
+/*
+ * The following will be used for determining/using access control
+ * policy.
+ */
+#define USE_SCM            0 /* use scm call to determine policy */
+#define OVERRIDE_SCM_TRUE  1 /* override scm call with true */
+#define OVERRIDE_SCM_FALSE 2 /* override scm call with false */
+
+#define SD_ENABLED  0 /* secure debug enabled. */
+#define SD_DISABLED 1 /* secure debug disabled. */
+
+#define IPA_MEM_INIT_VAL 0xFFFFFFFF
+
 #ifdef CONFIG_COMPAT
 #define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_ADD_HDR, \
@@ -1520,6 +1533,7 @@
  * @init_completion_obj: Completion object to be used in case IPA driver hasn't
  * @mhi_evid_limits: MHI event rings start and end ids
  *  finished initializing. Example of use - IOCTLs to /dev/ipa
+ * @dl_csum_offload_enabled: IPA will do dl csum offload
  * IPA context - holds all relevant info about IPA driver and its state
  */
 struct ipa3_context {
@@ -1664,10 +1678,18 @@
 	bool use_ipa_pm;
 	bool vlan_mode_iface[IPA_VLAN_IF_MAX];
 	bool wdi_over_pcie;
+	u32 entire_ipa_block_size;
+	bool do_register_collection_on_crash;
+	bool do_testbus_collection_on_crash;
+	bool do_non_tn_collection_on_crash;
+	u32 secure_debug_check_action;
+	u32 sd_state;
+	void __iomem *reg_collection_base;
 	struct ipa3_wdi2_ctx wdi2_ctx;
 	struct mbox_client mbox_client;
 	struct mbox_chan *mbox;
 	atomic_t ipa_clk_vote;
+	bool dl_csum_offload_enabled;
 };
 
 struct ipa3_plat_drv_res {
@@ -1707,6 +1729,11 @@
 	bool use_ipa_pm;
 	struct ipa_pm_init_params pm_init;
 	bool wdi_over_pcie;
+	u32 entire_ipa_block_size;
+	bool do_register_collection_on_crash;
+	bool do_testbus_collection_on_crash;
+	bool do_non_tn_collection_on_crash;
+	u32 secure_debug_check_action;
 };
 
 /**
@@ -2701,4 +2728,13 @@
 	unsigned long *size_ptr);
 irq_handler_t ipa3_get_isr(void);
 void ipa_pc_qmp_enable(void);
+#if defined(CONFIG_IPA3_REGDUMP)
+int ipa_reg_save_init(u32 value);
+void ipa_save_registers(void);
+void ipa_save_gsi_ver(void);
+#else
+static inline int ipa_reg_save_init(u32 value) { return 0; }
+static inline void ipa_save_registers(void) {};
+static inline void ipa_save_gsi_ver(void) {};
+#endif
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index b522776..df9aceb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/fs.h>
@@ -392,6 +392,8 @@
 	uint8_t mac[IPA_MAC_ADDR_SIZE];
 	uint8_t mac2[IPA_MAC_ADDR_SIZE];
 
+	if (!buff)
+		return -EINVAL;
 	if (meta->msg_type == WLAN_CLIENT_CONNECT_EX) {
 		/* debug print */
 		event_ex_cur_con = buff;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
index ebe62c2..bc0f891 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
@@ -1072,7 +1072,6 @@
 		}
 	}
 
-	imp_ctx->state = IMP_PROBED;
 	mutex_unlock(&imp_ctx->mutex);
 
 	IMP_FUNC_EXIT();
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index a3331e2..46a7503 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -2432,7 +2432,7 @@
 			{ 31, 31, 8, 8, IPA_EE_AP } },
 
 	/* IPA_4_5 APQ */
-	[IPA_4_5_APQ][IPA_CLIENT_WLAN1_PROD]          = {
+	[IPA_4_5_APQ][IPA_CLIENT_WLAN2_PROD]          = {
 			true, IPA_v4_5_GROUP_UL_DL_SRC,
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
@@ -2494,7 +2494,7 @@
 			QMB_MASTER_SELECT_DDR,
 			{ 10, 10, 8, 16, IPA_EE_AP } },
 
-	[IPA_4_5_APQ][IPA_CLIENT_WLAN1_CONS]          = {
+	[IPA_4_5_APQ][IPA_CLIENT_WLAN2_CONS]          = {
 			true, IPA_v4_5_GROUP_UL_DL_DST,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 525aebf..b4d123a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -1197,7 +1197,7 @@
 static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
 		void *const base, u32 offset,
 		u32 hdr_len, bool is_hdr_proc_ctx,
-		dma_addr_t phys_base, u32 hdr_base_addr,
+		dma_addr_t phys_base, u64 hdr_base_addr,
 		struct ipa_hdr_offset_entry *offset_entry,
 		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params)
 {
@@ -1346,7 +1346,7 @@
 	int (*ipahal_cp_proc_ctx_to_hw_buff)(enum ipa_hdr_proc_type type,
 			void *const base, u32 offset, u32 hdr_len,
 			bool is_hdr_proc_ctx, dma_addr_t phys_base,
-			u32 hdr_base_addr,
+			u64 hdr_base_addr,
 			struct ipa_hdr_offset_entry *offset_entry,
 			struct ipa_l2tp_hdr_proc_ctx_params l2tp_params);
 
@@ -1418,11 +1418,11 @@
 int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
 		void *const base, u32 offset, u32 hdr_len,
 		bool is_hdr_proc_ctx, dma_addr_t phys_base,
-		u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry,
+		u64 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry,
 		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params)
 {
 	IPAHAL_DBG(
-		"type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %pK\n"
+		"type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %llu, offset_entry %pK\n"
 			, type, base, offset, hdr_len, is_hdr_proc_ctx,
 			hdr_base_addr, offset_entry);
 
@@ -1432,7 +1432,7 @@
 		(!is_hdr_proc_ctx && !offset_entry) ||
 		(!is_hdr_proc_ctx && !hdr_base_addr)) {
 		IPAHAL_ERR(
-			"invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n"
+			"invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%llu is_hdr_proc_ctx:%d offset_entry:%pK\n"
 			, hdr_len, &phys_base, hdr_base_addr
 			, is_hdr_proc_ctx, offset_entry);
 		return -EINVAL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index 8a710da..fb2ba48 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _IPAHAL_H_
@@ -634,7 +634,7 @@
 int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
 		void *base, u32 offset, u32 hdr_len,
 		bool is_hdr_proc_ctx, dma_addr_t phys_base,
-		u32 hdr_base_addr,
+		u64 hdr_base_addr,
 		struct ipa_hdr_offset_entry *offset_entry,
 		struct ipa_l2tp_hdr_proc_ctx_params l2tp_params);
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index 0b2b24f..717b54d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/ipa.h>
@@ -1853,7 +1853,8 @@
 	if (attrib->fl_eq_present)
 		rest = ipa_write_32(attrib->fl_eq & 0xFFFFF, rest);
 
-	extra = ipa_pad_to_64(extra);
+	if (extra)
+		extra = ipa_pad_to_64(extra);
 	rest = ipa_pad_to_64(rest);
 	*buf = rest;
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 4c692a6..ca20dc2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -193,18 +193,18 @@
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
 		hdr_entry->hdr_len = IPA_DL_CHECKSUM_LENGTH; /* 8 bytes */
 		/* new DL QMAP header format */
-		hdr->hdr[0].hdr[0] = 0x40;
-		hdr->hdr[0].hdr[1] = 0;
-		hdr->hdr[0].hdr[2] = 0;
-		hdr->hdr[0].hdr[3] = 0;
-		hdr->hdr[0].hdr[4] = 0x4;
+		hdr_entry->hdr[0] = 0x40;
+		hdr_entry->hdr[1] = 0;
+		hdr_entry->hdr[2] = 0;
+		hdr_entry->hdr[3] = 0;
+		hdr_entry->hdr[4] = 0x4;
 		/*
 		 * Need to set csum required/valid bit on which will be replaced
 		 * by HW if checksum is incorrect after validation
 		 */
-		hdr->hdr[0].hdr[5] = 0x80;
-		hdr->hdr[0].hdr[6] = 0;
-		hdr->hdr[0].hdr[7] = 0;
+		hdr_entry->hdr[5] = 0x80;
+		hdr_entry->hdr[6] = 0;
+		hdr_entry->hdr[7] = 0;
 	} else
 		hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
 
@@ -326,8 +326,27 @@
 	 strlcpy(hdr_entry->name, hdr_name,
 				IPA_RESOURCE_NAME_MAX);
 
-	hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
-	hdr_entry->hdr[1] = (uint8_t) mux_id;
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+		ipa3_ctx->dl_csum_offload_enabled) {
+		hdr_entry->hdr_len = IPA_DL_CHECKSUM_LENGTH; /* 8 bytes */
+		/* new DL QMAP header format */
+		hdr_entry->hdr[0] = 0x40;
+		hdr_entry->hdr[1] = (uint8_t) mux_id;
+		hdr_entry->hdr[2] = 0;
+		hdr_entry->hdr[3] = 0;
+		hdr_entry->hdr[4] = 0x4;
+		/*
+		 * Need to set csum required/valid bit on which will be replaced
+		 * by HW if checksum is incorrect after validation
+		 */
+		hdr_entry->hdr[5] = 0x80;
+		hdr_entry->hdr[6] = 0;
+		hdr_entry->hdr[7] = 0;
+	} else {
+		hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
+		hdr_entry->hdr[1] = (uint8_t) mux_id;
+	}
+
 	IPAWANDBG("header (%s) with mux-id: (%d)\n",
 		hdr_name,
 		hdr_entry->hdr[1]);
@@ -1348,9 +1367,10 @@
 	}
 
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
-		(in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
+		(in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM) {
 		ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 8;
-	else
+		ipa3_ctx->dl_csum_offload_enabled = true;
+	} else
 		ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 4;
 	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
 	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
@@ -2797,6 +2817,11 @@
 	if (!ipa3_rmnet_ctx.ipa_rmnet_ssr)
 		return NOTIFY_DONE;
 
+	if (!ipa3_ctx) {
+		IPAWANERR_RL("ipa3_ctx was not initialized\n");
+		return NOTIFY_DONE;
+	}
+
 	if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ) {
 		IPAWANERR("Local modem SSR event=%lu on APQ platform\n",
 			code);
@@ -4375,6 +4400,10 @@
 	void *ssr_hdl;
 	int rc = 0;
 
+	if (!ipa3_ctx) {
+		IPAWANERR_RL("ipa3_ctx was not initialized\n");
+		return -EINVAL;
+	}
 	rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL);
 
 	if (!rmnet_ipa3_ctx)
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index d0b5cbd..414ebd4 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"%s: " fmt, __func__
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 #include <linux/of_platform.h>
 #include <linux/msm_ext_display.h>
+#include <linux/extcon-provider.h>
 
 struct msm_ext_disp_list {
 	struct msm_ext_disp_init_data *data;
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index e5e0dc4..68f5a12 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -23,6 +23,14 @@
 #define GENI_SE_IOMMU_VA_START	(0x40000000)
 #define GENI_SE_IOMMU_VA_SIZE	(0xC0000000)
 
+#ifdef CONFIG_ARM64
+#define GENI_SE_DMA_PTR_L(ptr) ((u32)ptr)
+#define GENI_SE_DMA_PTR_H(ptr) ((u32)(ptr >> 32))
+#else
+#define GENI_SE_DMA_PTR_L(ptr) ((u32)ptr)
+#define GENI_SE_DMA_PTR_H(ptr) 0
+#endif
+
 #define NUM_LOG_PAGES 2
 #define MAX_CLK_PERF_LEVEL 32
 static unsigned long default_bus_bw_set[] = {0, 19200000, 50000000, 100000000};
@@ -1039,8 +1047,8 @@
 		return ret;
 
 	geni_write_reg(7, base, SE_DMA_TX_IRQ_EN_SET);
-	geni_write_reg((u32)(*tx_dma), base, SE_DMA_TX_PTR_L);
-	geni_write_reg((u32)((*tx_dma) >> 32), base, SE_DMA_TX_PTR_H);
+	geni_write_reg(GENI_SE_DMA_PTR_L(*tx_dma), base, SE_DMA_TX_PTR_L);
+	geni_write_reg(GENI_SE_DMA_PTR_H(*tx_dma), base, SE_DMA_TX_PTR_H);
 	geni_write_reg(1, base, SE_DMA_TX_ATTR);
 	geni_write_reg(tx_len, base, SE_DMA_TX_LEN);
 	return 0;
@@ -1073,8 +1081,8 @@
 		return ret;
 
 	geni_write_reg(7, base, SE_DMA_RX_IRQ_EN_SET);
-	geni_write_reg((u32)(*rx_dma), base, SE_DMA_RX_PTR_L);
-	geni_write_reg((u32)((*rx_dma) >> 32), base, SE_DMA_RX_PTR_H);
+	geni_write_reg(GENI_SE_DMA_PTR_L(*rx_dma), base, SE_DMA_RX_PTR_L);
+	geni_write_reg(GENI_SE_DMA_PTR_H(*rx_dma), base, SE_DMA_RX_PTR_H);
 	/* RX does not have EOT bit */
 	geni_write_reg(0, base, SE_DMA_RX_ATTR);
 	geni_write_reg(rx_len, base, SE_DMA_RX_LEN);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index db2af09..b6f2ff9 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -442,8 +442,7 @@
 	{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
 	{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
 	{ KE_KEY, 0x32, { KEY_MUTE } },
-	{ KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
-	{ KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
+	{ KE_KEY, 0x35, { KEY_SCREENLOCK } },
 	{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
 	{ KE_KEY, 0x41, { KEY_NEXTSONG } },
 	{ KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 2d6e272..db3556d 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2231,7 +2231,8 @@
 		err = asus_wmi_backlight_init(asus);
 		if (err && err != -ENODEV)
 			goto fail_backlight;
-	}
+	} else
+		err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
 
 	status = wmi_install_notify_handler(asus->driver->event_guid,
 					    asus_wmi_notify, asus);
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index 6da79ae..5a97e42 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -428,14 +428,14 @@
 		if (ret)
 			return ret;
 
-		val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
+		val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256;
 		break;
 	case POWER_SUPPLY_PROP_TEMP_AMBIENT:
 		ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
 		if (ret)
 			return ret;
 
-		val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
+		val->intval = (int)be16_to_cpu(ec_word) * 10 / 256;
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
 		ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2);
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index d9efe65..d685f81 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "QCOM-BATT: %s: " fmt, __func__
@@ -40,6 +40,7 @@
 #define PL_FCC_LOW_VOTER		"PL_FCC_LOW_VOTER"
 #define ICL_LIMIT_VOTER			"ICL_LIMIT_VOTER"
 #define FCC_STEPPER_VOTER		"FCC_STEPPER_VOTER"
+#define FCC_VOTER			"FCC_VOTER"
 
 struct pl_data {
 	int			pl_mode;
@@ -57,6 +58,7 @@
 	struct votable		*hvdcp_hw_inov_dis_votable;
 	struct votable		*usb_icl_votable;
 	struct votable		*pl_enable_votable_indirect;
+	struct votable		*cp_ilim_votable;
 	struct delayed_work	status_change_work;
 	struct work_struct	pl_disable_forever_work;
 	struct work_struct	pl_taper_work;
@@ -68,6 +70,7 @@
 	struct power_supply	*batt_psy;
 	struct power_supply	*usb_psy;
 	struct power_supply	*dc_psy;
+	struct power_supply	*cp_master_psy;
 	int			charge_type;
 	int			total_settled_ua;
 	int			pl_settled_ua;
@@ -85,6 +88,7 @@
 	struct wakeup_source	*pl_ws;
 	struct notifier_block	nb;
 	bool			pl_disable;
+	bool			cp_disabled;
 	int			taper_entry_fv;
 	int			main_fcc_max;
 	/* debugfs directory */
@@ -476,6 +480,46 @@
 	}
 }
 
+static void get_main_fcc_config(struct pl_data *chip, int *total_fcc)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (!chip->cp_master_psy)
+		chip->cp_master_psy =
+			power_supply_get_by_name("charge_pump_master");
+	if (!chip->cp_master_psy)
+		goto out;
+
+	rc = power_supply_get_property(chip->cp_master_psy,
+			POWER_SUPPLY_PROP_CP_SWITCHER_EN, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get switcher enable status, rc=%d\n", rc);
+		goto out;
+	}
+
+	if (!pval.intval) {
+		/*
+		 * To honor main charger upper FCC limit, on CP switcher
+		 * disable, skip fcc slewing as it will cause delay in limiting
+		 * the charge current flowing through main charger.
+		 */
+		if (!chip->cp_disabled) {
+			chip->fcc_stepper_enable = false;
+			pl_dbg(chip, PR_PARALLEL,
+				"Disabling FCC slewing on CP Switcher disable\n");
+		}
+		chip->cp_disabled = true;
+	} else {
+		chip->cp_disabled = false;
+		pl_dbg(chip, PR_PARALLEL,
+			"CP Switcher is enabled, don't limit main fcc\n");
+		return;
+	}
+out:
+	*total_fcc = min(*total_fcc, chip->main_fcc_max);
+}
+
 static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua,
 			int parallel_fcc_ua)
 {
@@ -618,6 +662,9 @@
 	if (!chip->main_psy)
 		return 0;
 
+	if (!chip->cp_ilim_votable)
+		chip->cp_ilim_votable = find_votable("CP_ILIM");
+
 	if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
 		get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
 				&slave_fcc_ua);
@@ -814,6 +861,10 @@
 	chip->main_fcc_ua = main_fcc;
 	chip->slave_fcc_ua = parallel_fcc;
 
+	if (chip->cp_ilim_votable)
+		vote(chip->cp_ilim_votable, FCC_VOTER, true,
+					chip->main_fcc_ua / 2);
+
 	if (reschedule_ms) {
 		schedule_delayed_work(&chip->fcc_stepper_work,
 				msecs_to_jiffies(reschedule_ms));
@@ -929,6 +980,9 @@
 
 	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0);
 
+	if (chip->cp_ilim_votable)
+		vote(chip->cp_ilim_votable, ICL_CHANGE_VOTER, true, icl_ua);
+
 	return 0;
 }
 
@@ -1161,8 +1215,7 @@
 			(slave_fcc_ua * 100) / total_fcc_ua);
 	} else {
 		if (chip->main_fcc_max)
-			total_fcc_ua = min(total_fcc_ua,
-						chip->main_fcc_max);
+			get_main_fcc_config(chip, &total_fcc_ua);
 
 		if (!chip->fcc_stepper_enable) {
 			if (IS_USBIN(chip->pl_mode))
@@ -1188,6 +1241,10 @@
 				return rc;
 			}
 
+			if (chip->cp_ilim_votable)
+				vote(chip->cp_ilim_votable, FCC_VOTER, true,
+						total_fcc_ua / 2);
+
 			/* reset parallel FCC */
 			chip->slave_fcc_ua = 0;
 			chip->total_settled_ua = 0;
@@ -1700,6 +1757,7 @@
 	}
 
 	chip->pl_disable = true;
+	chip->cp_disabled = true;
 	chip->qcom_batt_class.name = "qcom-battery",
 	chip->qcom_batt_class.owner = THIS_MODULE,
 	chip->qcom_batt_class.class_groups = batt_class_groups;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c
index f7ed5819..e053a07 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen4.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c
@@ -6,6 +6,7 @@
 #define pr_fmt(fmt)	"FG: %s: " fmt, __func__
 
 #include <linux/alarmtimer.h>
+#include <linux/irq.h>
 #include <linux/ktime.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -188,8 +189,10 @@
 	bool	five_pin_battery;
 	bool	multi_profile_load;
 	bool	esr_calib_dischg;
+	bool	soc_hi_res;
 	int	cutoff_volt_mv;
 	int	empty_volt_mv;
+	int	sys_min_volt_mv;
 	int	cutoff_curr_ma;
 	int	sys_term_curr_ma;
 	int	delta_soc_thr;
@@ -236,7 +239,7 @@
 	struct work_struct	esr_calib_work;
 	struct alarm		esr_fast_cal_timer;
 	struct delayed_work	pl_enable_work;
-	struct delayed_work	pl_current_en_work;
+	struct work_struct	pl_current_en_work;
 	struct completion	mem_attn;
 	char			batt_profile[PROFILE_LEN];
 	enum slope_limit_status	slope_limit_sts;
@@ -851,6 +854,35 @@
 	return 0;
 }
 
+static int fg_gen4_get_prop_capacity_raw(struct fg_gen4_chip *chip, int *val)
+{
+	struct fg_dev *fg = &chip->fg;
+	int rc;
+
+	if (!chip->dt.soc_hi_res) {
+		rc = fg_get_msoc_raw(fg, val);
+		return rc;
+	}
+
+	if (!is_input_present(fg)) {
+		rc = fg_gen4_get_prop_capacity(fg, val);
+		if (!rc)
+			*val = *val * 100;
+		return rc;
+	}
+
+	rc = fg_get_sram_prop(&chip->fg, FG_SRAM_MONOTONIC_SOC, val);
+	if (rc < 0) {
+		pr_err("Error in getting MONOTONIC_SOC, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Show it in centi-percentage */
+	*val = (*val * 10000) / 0xFFFF;
+
+	return 0;
+}
+
 static inline void get_esr_meas_current(int curr_ma, u8 *val)
 {
 	switch (curr_ma) {
@@ -874,6 +906,43 @@
 	*val <<= ESR_PULL_DOWN_IVAL_SHIFT;
 }
 
+static int fg_gen4_get_power(struct fg_gen4_chip *chip, int *val, bool average)
+{
+	struct fg_dev *fg = &chip->fg;
+	int rc, v_min, v_pred, esr_uohms, rslow_uohms;
+	s64 power;
+
+	rc = fg_get_sram_prop(fg, FG_SRAM_VOLTAGE_PRED, &v_pred);
+	if (rc < 0)
+		return rc;
+
+	v_min = chip->dt.sys_min_volt_mv * 1000;
+	power = (s64)v_min * (v_pred - v_min);
+
+	rc = fg_get_sram_prop(fg, FG_SRAM_ESR_ACT, &esr_uohms);
+	if (rc < 0) {
+		pr_err("failed to get ESR_ACT, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_get_sram_prop(fg, FG_SRAM_RSLOW, &rslow_uohms);
+	if (rc < 0) {
+		pr_err("failed to get Rslow, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (average)
+		power = div_s64(power, esr_uohms + rslow_uohms);
+	else
+		power = div_s64(power, esr_uohms);
+
+	pr_debug("V_min: %d V_pred: %d ESR: %d Rslow: %d power: %lld\n", v_min,
+		v_pred, esr_uohms, rslow_uohms, power);
+
+	*val = power;
+	return 0;
+}
+
 /* ALG callback functions below */
 
 static int fg_gen4_get_ttf_param(void *data, enum ttf_param param, int *val)
@@ -3182,9 +3251,15 @@
 	fg_dbg(fg, FG_STATUS, "esr_raw: 0x%x esr_char_raw: 0x%x esr_meas_diff: 0x%x esr_delta: 0x%x\n",
 		esr_raw, esr_char_raw, esr_meas_diff, esr_delta);
 
-	fg_esr_meas_diff = esr_delta - esr_meas_diff;
-	esr_filtered = fg_esr_meas_diff >> chip->dt.esr_filter_factor;
-	esr_delta = esr_delta - esr_filtered;
+	fg_esr_meas_diff = esr_meas_diff - (esr_delta / 32);
+
+	/* Don't filter for the first attempt so that ESR can converge faster */
+	if (!chip->delta_esr_count)
+		esr_filtered = fg_esr_meas_diff;
+	else
+		esr_filtered = fg_esr_meas_diff >> chip->dt.esr_filter_factor;
+
+	esr_delta = esr_delta + (esr_filtered * 32);
 
 	/* Bound the limits */
 	if (esr_delta > SHRT_MAX)
@@ -3221,31 +3296,16 @@
 {
 	struct fg_gen4_chip *chip = container_of(work,
 				struct fg_gen4_chip,
-				pl_current_en_work.work);
+				pl_current_en_work);
 	struct fg_dev *fg = &chip->fg;
 	bool input_present = is_input_present(fg), en;
 
 	en = fg->charge_done ? false : input_present;
 
-	/*
-	 * If mem_attn_irq is disabled and parallel summing current
-	 * configuration needs to be modified, then enable mem_attn_irq and
-	 * wait for 1 second before doing it.
-	 */
-	if (get_effective_result(chip->parallel_current_en_votable) != en &&
-		!get_effective_result(chip->mem_attn_irq_en_votable)) {
-		vote(chip->mem_attn_irq_en_votable, MEM_ATTN_IRQ_VOTER,
-			true, 0);
-		schedule_delayed_work(&chip->pl_current_en_work,
-			msecs_to_jiffies(1000));
-		return;
-	}
-
-	if (!get_effective_result(chip->mem_attn_irq_en_votable))
+	if (get_effective_result(chip->parallel_current_en_votable) == en)
 		return;
 
 	vote(chip->parallel_current_en_votable, FG_PARALLEL_EN_VOTER, en, 0);
-	vote(chip->mem_attn_irq_en_votable, MEM_ATTN_IRQ_VOTER, false, 0);
 }
 
 static void pl_enable_work(struct work_struct *work)
@@ -3339,9 +3399,10 @@
 	if (rc < 0)
 		pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc);
 
-	if (is_parallel_charger_available(fg) &&
-		!delayed_work_pending(&chip->pl_current_en_work))
-		schedule_delayed_work(&chip->pl_current_en_work, 0);
+	if (is_parallel_charger_available(fg)) {
+		cancel_work_sync(&chip->pl_current_en_work);
+		schedule_work(&chip->pl_current_en_work);
+	}
 
 	ttf_update(chip->ttf, input_present);
 	fg->prev_charge_status = fg->charge_status;
@@ -3548,7 +3609,7 @@
 {
 	struct fg_gen4_chip *chip = power_supply_get_drvdata(psy);
 	struct fg_dev *fg = &chip->fg;
-	int rc = 0;
+	int rc = 0, val;
 	int64_t temp;
 
 	switch (psp) {
@@ -3556,7 +3617,16 @@
 		rc = fg_gen4_get_prop_capacity(fg, &pval->intval);
 		break;
 	case POWER_SUPPLY_PROP_CAPACITY_RAW:
-		rc = fg_get_msoc_raw(fg, &pval->intval);
+		rc = fg_gen4_get_prop_capacity_raw(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CC_SOC:
+		rc = fg_get_sram_prop(&chip->fg, FG_SRAM_CC_SOC, &val);
+		if (rc < 0) {
+			pr_err("Error in getting CC_SOC, rc=%d\n", rc);
+			return rc;
+		}
+		/* Show it in centi-percentage */
+		pval->intval = div_s64((int64_t)val * 10000,  CC_SOC_30BIT);
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
 		if (fg->battery_missing)
@@ -3594,6 +3664,9 @@
 	case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
 		rc = fg_gen4_get_charge_raw(chip, &pval->intval);
 		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW:
+		pval->intval = chip->cl->init_cap_uah;
+		break;
 	case POWER_SUPPLY_PROP_CHARGE_FULL:
 		rc = fg_gen4_get_learned_capacity(chip, &temp);
 		if (!rc)
@@ -3656,6 +3729,12 @@
 	case POWER_SUPPLY_PROP_BATT_AGE_LEVEL:
 		pval->intval = chip->batt_age_level;
 		break;
+	case POWER_SUPPLY_PROP_POWER_NOW:
+		rc = fg_gen4_get_power(chip, &pval->intval, false);
+		break;
+	case POWER_SUPPLY_PROP_POWER_AVG:
+		rc = fg_gen4_get_power(chip, &pval->intval, true);
+		break;
 	default:
 		pr_err("unsupported property %d\n", psp);
 		rc = -EINVAL;
@@ -3775,6 +3854,7 @@
 static enum power_supply_property fg_psy_props[] = {
 	POWER_SUPPLY_PROP_CAPACITY,
 	POWER_SUPPLY_PROP_CAPACITY_RAW,
+	POWER_SUPPLY_PROP_CC_SOC,
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_VOLTAGE_OCV,
@@ -3787,6 +3867,7 @@
 	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
 	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
 	POWER_SUPPLY_PROP_CHARGE_FULL,
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW,
@@ -3801,6 +3882,8 @@
 	POWER_SUPPLY_PROP_CC_STEP,
 	POWER_SUPPLY_PROP_CC_STEP_SEL,
 	POWER_SUPPLY_PROP_BATT_AGE_LEVEL,
+	POWER_SUPPLY_PROP_POWER_NOW,
+	POWER_SUPPLY_PROP_POWER_AVG,
 };
 
 static const struct power_supply_desc fg_psy_desc = {
@@ -3909,6 +3992,8 @@
 	int rc;
 	u8 val, mask;
 
+	vote(chip->mem_attn_irq_en_votable, MEM_ATTN_IRQ_VOTER, true, 0);
+
 	/* Wait for MEM_ATTN interrupt */
 	rc = fg_wait_for_mem_attn(chip);
 	if (rc < 0)
@@ -3921,6 +4006,7 @@
 		pr_err("Error in writing to 0x%04x, rc=%d\n",
 			BATT_INFO_FG_CNV_CHAR_CFG(fg), rc);
 
+	vote(chip->mem_attn_irq_en_votable, MEM_ATTN_IRQ_VOTER, false, 0);
 	fg_dbg(fg, FG_STATUS, "Parallel current summing: %d\n", enable);
 
 	return rc;
@@ -4674,6 +4760,7 @@
 
 #define DEFAULT_CUTOFF_VOLT_MV		3100
 #define DEFAULT_EMPTY_VOLT_MV		2812
+#define DEFAULT_SYS_MIN_VOLT_MV		2800
 #define DEFAULT_SYS_TERM_CURR_MA	-125
 #define DEFAULT_CUTOFF_CURR_MA		200
 #define DEFAULT_DELTA_SOC_THR		5	/* 0.5 % */
@@ -4942,6 +5029,11 @@
 					"qcom,five-pin-battery");
 	chip->dt.multi_profile_load = of_property_read_bool(node,
 					"qcom,multi-profile-load");
+	chip->dt.soc_hi_res = of_property_read_bool(node, "qcom,soc-hi-res");
+
+	chip->dt.sys_min_volt_mv = DEFAULT_SYS_MIN_VOLT_MV;
+	of_property_read_u32(node, "qcom,fg-sys-min-voltage",
+				&chip->dt.sys_min_volt_mv);
 	return 0;
 }
 
@@ -4954,7 +5046,7 @@
 	cancel_work_sync(&fg->status_change_work);
 	cancel_delayed_work_sync(&fg->profile_load_work);
 	cancel_delayed_work_sync(&fg->sram_dump_work);
-	cancel_delayed_work_sync(&chip->pl_current_en_work);
+	cancel_work_sync(&chip->pl_current_en_work);
 
 	power_supply_unreg_notifier(&fg->nb);
 	debugfs_remove_recursive(fg->dfs_root);
@@ -5017,7 +5109,7 @@
 	INIT_DELAYED_WORK(&fg->profile_load_work, profile_load_work);
 	INIT_DELAYED_WORK(&fg->sram_dump_work, sram_dump_work);
 	INIT_DELAYED_WORK(&chip->pl_enable_work, pl_enable_work);
-	INIT_DELAYED_WORK(&chip->pl_current_en_work, pl_current_en_work);
+	INIT_WORK(&chip->pl_current_en_work, pl_current_en_work);
 
 	fg->awake_votable = create_votable("FG_WS", VOTE_SET_ANY,
 					fg_awake_cb, fg);
@@ -5131,6 +5223,10 @@
 		goto exit;
 	}
 
+	if (fg->irqs[MEM_ATTN_IRQ].irq)
+		irq_set_status_flags(fg->irqs[MEM_ATTN_IRQ].irq,
+					IRQ_DISABLE_UNLAZY);
+
 	/* Keep SOC_UPDATE irq disabled until we require it */
 	if (fg->irqs[SOC_UPDATE_IRQ].irq)
 		disable_irq_nosync(fg->irqs[SOC_UPDATE_IRQ].irq);
diff --git a/drivers/power/supply/qcom/qpnp-qnovo5.c b/drivers/power/supply/qcom/qpnp-qnovo5.c
index a319936..6ec3f3a 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo5.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo5.c
@@ -1,8 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  */
 
+#define pr_fmt(fmt)	"Qnovo: %s: " fmt, __func__
+
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -13,6 +15,7 @@
 #include <linux/of_irq.h>
 #include <linux/pmic-voter.h>
 #include <linux/delay.h>
+#include <linux/pinctrl/consumer.h>
 
 #define QNOVO_PE_CTRL			0x45
 #define QNOVO_PTRAIN_EN_BIT		BIT(7)
@@ -72,6 +75,7 @@
 #define USER_VOTER		"user_voter"
 #define SHUTDOWN_VOTER		"user_voter"
 #define OK_TO_QNOVO_VOTER	"ok_to_qnovo_voter"
+#define HW_OK_TO_QNOVO_VOTER	"HW_OK_TO_QNOVO_VOTER"
 
 #define QNOVO_VOTER		"qnovo_voter"
 #define QNOVO_OVERALL_VOTER	"QNOVO_OVERALL_VOTER"
@@ -108,6 +112,9 @@
 	struct class		qnovo_class;
 	struct power_supply	*batt_psy;
 	struct power_supply	*usb_psy;
+	struct pinctrl		*pinctrl;
+	struct pinctrl_state	*pinctrl_state1;
+	struct pinctrl_state	*pinctrl_state2;
 	struct notifier_block	nb;
 	struct votable		*disable_votable;
 	struct votable		*pt_dis_votable;
@@ -297,6 +304,30 @@
 		return rc;
 	}
 
+	chip->pinctrl = devm_pinctrl_get(chip->dev);
+	if (IS_ERR(chip->pinctrl)) {
+		pr_err("Couldn't get pinctrl rc=%d\n", PTR_ERR(chip->pinctrl));
+		chip->pinctrl = NULL;
+	}
+
+	if (chip->pinctrl) {
+		chip->pinctrl_state1 = pinctrl_lookup_state(chip->pinctrl,
+						"q_state1");
+		if (IS_ERR(chip->pinctrl_state1)) {
+			rc = PTR_ERR(chip->pinctrl_state1);
+			pr_err("Couldn't get pinctrl state1 rc=%d\n", rc);
+			return rc;
+		}
+
+		chip->pinctrl_state2 = pinctrl_lookup_state(chip->pinctrl,
+						"q_state2");
+		if (IS_ERR(chip->pinctrl_state2)) {
+			rc = PTR_ERR(chip->pinctrl_state2);
+			pr_err("Couldn't get pinctrl state2 rc=%d\n", rc);
+			return rc;
+		}
+	}
+
 	return 0;
 }
 
@@ -1098,8 +1129,8 @@
 	struct qnovo *chip = container_of(work,
 			struct qnovo, status_change_work);
 	union power_supply_propval pval;
-	bool usb_present = false;
-	int rc;
+	bool usb_present = false, hw_ok_to_qnovo = false;
+	int rc, battery_health, charge_status;
 
 	if (is_usb_available(chip)) {
 		rc = power_supply_get_property(chip->usb_psy,
@@ -1113,6 +1144,17 @@
 		cancel_delayed_work_sync(&chip->usb_debounce_work);
 		vote(chip->awake_votable, USB_READY_VOTER, false, 0);
 		vote(chip->chg_ready_votable, USB_READY_VOTER, false, 0);
+		if (chip->pinctrl) {
+			rc = pinctrl_select_state(chip->pinctrl,
+					chip->pinctrl_state1);
+			if (rc < 0)
+				pr_err("Couldn't select state 1 rc=%d\n", rc);
+
+			rc = pinctrl_select_state(chip->pinctrl,
+					chip->pinctrl_state2);
+			if (rc < 0)
+				pr_err("Couldn't select state 2 rc=%d\n", rc);
+		}
 	} else if (!chip->usb_present && usb_present) {
 		/* insertion */
 		chip->usb_present = 1;
@@ -1120,6 +1162,36 @@
 		schedule_delayed_work(&chip->usb_debounce_work,
 				msecs_to_jiffies(DEBOUNCE_MS));
 	}
+
+	if (!is_batt_available(chip))
+		return;
+
+	rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+					&pval);
+	if (rc < 0) {
+		pr_err("Error in getting battery health, rc=%d\n", rc);
+		return;
+	}
+	battery_health = pval.intval;
+
+	rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
+					&pval);
+	if (rc < 0) {
+		pr_err("Error in getting charging status, rc=%d\n", rc);
+		return;
+	}
+	charge_status = pval.intval;
+
+	pr_debug("USB present: %d health:%d charge_status: %d\n",
+		chip->usb_present, battery_health, charge_status);
+
+	if (chip->usb_present) {
+		hw_ok_to_qnovo =
+			(battery_health == POWER_SUPPLY_HEALTH_GOOD) &&
+			(charge_status == POWER_SUPPLY_STATUS_CHARGING);
+		vote(chip->not_ok_to_qnovo_votable, HW_OK_TO_QNOVO_VOTER,
+					!hw_ok_to_qnovo, 0);
+	}
 }
 
 static int qnovo_notifier_call(struct notifier_block *nb,
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index e49989f..39c552f 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -189,6 +189,7 @@
 	int			term_current_src;
 	int			term_current_thresh_hi_ma;
 	int			term_current_thresh_lo_ma;
+	int			disable_suspend_on_collapse;
 };
 
 struct smb5 {
@@ -531,6 +532,8 @@
 	if (rc < 0)
 		return rc;
 
+	chip->dt.disable_suspend_on_collapse = of_property_read_bool(node,
+					"qcom,disable-suspend-on-collapse");
 	return 0;
 }
 
@@ -547,7 +550,6 @@
 	POWER_SUPPLY_PROP_TYPEC_MODE,
 	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
 	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION,
-	POWER_SUPPLY_PROP_TYPEC_SRC_RP,
 	POWER_SUPPLY_PROP_LOW_POWER,
 	POWER_SUPPLY_PROP_PD_ACTIVE,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
@@ -557,10 +559,8 @@
 	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
 	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
 	POWER_SUPPLY_PROP_REAL_TYPE,
-	POWER_SUPPLY_PROP_PR_SWAP,
 	POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
 	POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
-	POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CONNECTOR_TYPE,
 	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
@@ -1301,8 +1301,7 @@
 				QNOVO_VOTER);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_NOW:
-		rc = smblib_get_prop_from_bms(chg,
-				POWER_SUPPLY_PROP_CURRENT_NOW, val);
+		rc = smblib_get_batt_current_now(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
 		val->intval = get_client_vote_locked(chg->fcc_votable,
@@ -1333,10 +1332,7 @@
 		val->intval = 0;
 		break;
 	case POWER_SUPPLY_PROP_DIE_HEALTH:
-		if (chg->die_health == -EINVAL)
-			val->intval = smblib_get_prop_die_health(chg);
-		else
-			val->intval = chg->die_health;
+		rc = smblib_get_die_health(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_DP_DM:
 		val->intval = chg->pulse_cnt;
@@ -1415,13 +1411,6 @@
 	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
 		chg->step_chg_enabled = !!val->intval;
 		break;
-	case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
-		if (chg->sw_jeita_enabled != (!!val->intval)) {
-			rc = smblib_disable_hw_jeita(chg, !!val->intval);
-			if (rc == 0)
-				chg->sw_jeita_enabled = !!val->intval;
-		}
-		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
 		chg->batt_profile_fcc_ua = val->intval;
 		vote(chg->fcc_votable, BATT_PROFILE_VOTER, true, val->intval);
@@ -1472,6 +1461,9 @@
 			vote(chg->chg_disable_votable, FORCE_RECHARGE_VOTER,
 					false, 0);
 		break;
+	case POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE:
+		chg->fcc_stepper_enable = val->intval;
+		break;
 	default:
 		rc = -EINVAL;
 	}
@@ -1492,7 +1484,6 @@
 	case POWER_SUPPLY_PROP_RERUN_AICL:
 	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
 	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
-	case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
 	case POWER_SUPPLY_PROP_DIE_HEALTH:
 		return 1;
 	default:
@@ -1779,7 +1770,7 @@
 {
 	struct smb_charger *chg = &chip->chg;
 	int rc, type = 0;
-	u8 val = 0;
+	u8 val = 0, mask = 0;
 	union power_supply_propval pval;
 
 	if (chip->dt.no_battery)
@@ -1807,15 +1798,6 @@
 		}
 	}
 
-	/* Disable SMB Temperature ADC INT */
-	rc = smblib_masked_write(chg, MISC_THERMREG_SRC_CFG_REG,
-					 THERMREG_SMB_ADC_SRC_EN_BIT, 0);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't configure SMB thermal regulation  rc=%d\n",
-				rc);
-		return rc;
-	}
-
 	/*
 	 * If SW thermal regulation WA is active then all the HW temperature
 	 * comparators need to be disabled to prevent HW thermal regulation,
@@ -1829,6 +1811,15 @@
 				rc);
 			return rc;
 		}
+	} else {
+		/* Allows software thermal regulation only */
+		rc = smblib_write(chg, MISC_THERMREG_SRC_CFG_REG,
+					 THERMREG_SW_ICL_ADJUST_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't configure SMB thermal regulation rc=%d\n",
+					rc);
+			return rc;
+		}
 	}
 
 	/*
@@ -1900,8 +1891,8 @@
 		smblib_rerun_apsd_if_required(chg);
 	}
 
-	/* clear the ICL override if it is set */
-	rc = smblib_icl_override(chg, false);
+	/* Use ICL results from HW */
+	rc = smblib_icl_override(chg, HW_AUTO_MODE);
 	if (rc < 0) {
 		pr_err("Couldn't disable ICL override rc=%d\n", rc);
 		return rc;
@@ -1947,11 +1938,14 @@
 	 * start from min and AICL ADC disable, and enable aicl rerun
 	 */
 	if (chg->smb_version != PMI632_SUBTYPE) {
+		mask = USBIN_AICL_PERIODIC_RERUN_EN_BIT | USBIN_AICL_ADC_EN_BIT
+			| USBIN_AICL_EN_BIT | SUSPEND_ON_COLLAPSE_USBIN_BIT;
+		val = USBIN_AICL_PERIODIC_RERUN_EN_BIT | USBIN_AICL_EN_BIT;
+		if (!chip->dt.disable_suspend_on_collapse)
+			val |= SUSPEND_ON_COLLAPSE_USBIN_BIT;
+
 		rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG,
-				USBIN_AICL_PERIODIC_RERUN_EN_BIT
-				| USBIN_AICL_ADC_EN_BIT | USBIN_AICL_EN_BIT,
-				USBIN_AICL_PERIODIC_RERUN_EN_BIT
-				| USBIN_AICL_EN_BIT);
+				mask, val);
 		if (rc < 0) {
 			dev_err(chg->dev, "Couldn't config AICL rc=%d\n", rc);
 			return rc;
@@ -2152,12 +2146,10 @@
 		}
 	}
 
-	if (chg->sw_jeita_enabled) {
-		rc = smblib_disable_hw_jeita(chg, true);
-		if (rc < 0) {
-			dev_err(chg->dev, "Couldn't set hw jeita rc=%d\n", rc);
-			return rc;
-		}
+	rc = smblib_disable_hw_jeita(chg, true);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't set hw jeita rc=%d\n", rc);
+		return rc;
 	}
 
 	rc = smblib_masked_write(chg, DCDC_ENG_SDCDC_CFG5_REG,
@@ -2447,8 +2439,14 @@
 	[IMP_TRIGGER_IRQ] = {
 		.name		= "imp-trigger",
 	},
+	/*
+	 * triggered when DIE or SKIN or CONNECTOR temperature across
+	 * either of the _REG_L, _REG_H, _RST, or _SHDN thresholds
+	 */
 	[TEMP_CHANGE_IRQ] = {
 		.name		= "temp-change",
+		.handler	= temp_change_irq_handler,
+		.wake		= true,
 	},
 	[TEMP_CHANGE_SMB_IRQ] = {
 		.name		= "temp-change-smb",
diff --git a/drivers/power/supply/qcom/smb1390-charger-psy.c b/drivers/power/supply/qcom/smb1390-charger-psy.c
index 279a3e2..6d87589 100644
--- a/drivers/power/supply/qcom/smb1390-charger-psy.c
+++ b/drivers/power/supply/qcom/smb1390-charger-psy.c
@@ -77,6 +77,7 @@
 #define CP_VOTER		"CP_VOTER"
 #define USER_VOTER		"USER_VOTER"
 #define ILIM_VOTER		"ILIM_VOTER"
+#define TAPER_END_VOTER		"TAPER_END_VOTER"
 #define FCC_VOTER		"FCC_VOTER"
 #define ICL_VOTER		"ICL_VOTER"
 #define WIRELESS_VOTER		"WIRELESS_VOTER"
@@ -122,6 +123,7 @@
 	struct votable		*disable_votable;
 	struct votable		*ilim_votable;
 	struct votable		*fcc_votable;
+	struct votable		*fv_votable;
 	struct votable		*cp_awake_votable;
 
 	/* power supplies */
@@ -135,6 +137,8 @@
 	bool			taper_work_running;
 	struct smb1390_iio	iio;
 	int			irq_status;
+	int			taper_entry_fv;
+	bool			switcher_disabled;
 };
 
 struct smb_irq {
@@ -204,6 +208,19 @@
 		}
 	}
 
+	if (!chip->fv_votable) {
+		chip->fv_votable = find_votable("FV");
+		if (!chip->fv_votable) {
+			pr_debug("Couldn't find FV votable\n");
+			return false;
+		}
+	}
+
+	if (!chip->disable_votable) {
+		pr_debug("Couldn't find CP DISABLE votable\n");
+		return false;
+	}
+
 	return true;
 }
 
@@ -247,7 +264,8 @@
 static irqreturn_t default_irq_handler(int irq, void *data)
 {
 	struct smb1390 *chip = data;
-	int i;
+	int i, rc;
+	bool enable;
 
 	for (i = 0; i < NUM_IRQS; ++i) {
 		if (irq == chip->irqs[i]) {
@@ -256,8 +274,18 @@
 		}
 	}
 
+	rc = smb1390_get_cp_en_status(chip, SWITCHER_EN, &enable);
+	if (!rc) {
+		if (chip->switcher_disabled == enable) {
+			chip->switcher_disabled = !chip->switcher_disabled;
+			if (chip->fcc_votable)
+				rerun_election(chip->fcc_votable);
+		}
+	}
+
 	if (chip->cp_master_psy)
 		power_supply_changed(chip->cp_master_psy);
+
 	return IRQ_HANDLED;
 }
 
@@ -435,19 +463,21 @@
 		return -EINVAL;
 	}
 
+	rc = smb1390_masked_write(chip, CORE_FTRIM_ILIM_REG,
+		CFG_ILIM_MASK,
+		DIV_ROUND_CLOSEST(max(ilim_uA, 500000) - 500000, 100000));
+	if (rc < 0) {
+		pr_err("Failed to write ILIM Register, rc=%d\n", rc);
+		return rc;
+	}
+
 	/* ILIM less than 1A is not accurate; disable charging */
 	if (ilim_uA < 1000000) {
 		pr_debug("ILIM %duA is too low to allow charging\n", ilim_uA);
 		vote(chip->disable_votable, ILIM_VOTER, true, 0);
 	} else {
-		pr_debug("setting ILIM to %duA\n", ilim_uA);
-		rc = smb1390_masked_write(chip, CORE_FTRIM_ILIM_REG,
-				CFG_ILIM_MASK,
-				DIV_ROUND_CLOSEST(ilim_uA - 500000, 100000));
-		if (rc < 0)
-			pr_err("Failed to write ILIM Register, rc=%d\n", rc);
-		if (rc >= 0)
-			vote(chip->disable_votable, ILIM_VOTER, false, 0);
+		pr_debug("ILIM set to %duA\n", ilim_uA);
+		vote(chip->disable_votable, ILIM_VOTER, false, 0);
 	}
 
 	return rc;
@@ -544,9 +574,13 @@
 								pval.intval);
 		}
 
-		/* input current is always half the charge current */
-		vote(chip->ilim_votable, FCC_VOTER, true,
-				get_effective_result(chip->fcc_votable) / 2);
+		/*
+		 * Remove SMB1390 Taper condition disable vote if float voltage
+		 * increased in comparison to voltage at which it entered taper.
+		 */
+		if (chip->taper_entry_fv <
+				get_effective_result(chip->fv_votable))
+			vote(chip->disable_votable, TAPER_END_VOTER, false, 0);
 
 		/*
 		 * all votes that would result in disabling the charge pump have
@@ -574,6 +608,7 @@
 		}
 	} else {
 		vote(chip->disable_votable, SRC_VOTER, true, 0);
+		vote(chip->disable_votable, TAPER_END_VOTER, false, 0);
 		vote(chip->fcc_votable, CP_VOTER, false, 0);
 	}
 
@@ -591,11 +626,8 @@
 	if (!is_psy_voter_available(chip))
 		goto out;
 
-	do {
-		fcc_uA = get_effective_result(chip->fcc_votable) - 100000;
-		pr_debug("taper work reducing FCC to %duA\n", fcc_uA);
-		vote(chip->fcc_votable, CP_VOTER, true, fcc_uA);
-
+	chip->taper_entry_fv = get_effective_result(chip->fv_votable);
+	while (true) {
 		rc = power_supply_get_property(chip->batt_psy,
 					POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
 		if (rc < 0) {
@@ -603,12 +635,30 @@
 			goto out;
 		}
 
-		msleep(500);
-	} while (fcc_uA >= 2000000
-		 && pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER);
+		if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+			fcc_uA = get_effective_result(chip->fcc_votable)
+								- 100000;
+			pr_debug("taper work reducing FCC to %duA\n", fcc_uA);
+			vote(chip->fcc_votable, CP_VOTER, true, fcc_uA);
 
+			if (fcc_uA < 2000000) {
+				vote(chip->disable_votable, TAPER_END_VOTER,
+								true, 0);
+				goto out;
+			}
+		} else if (get_effective_result(chip->fv_votable) >
+						chip->taper_entry_fv) {
+			pr_debug("Float voltage increased. Exiting taper\n");
+			goto out;
+		} else {
+			pr_debug("In fast charging. Wait for next taper\n");
+		}
+
+		msleep(500);
+	}
 out:
 	pr_debug("taper work exit\n");
+	vote(chip->fcc_votable, CP_VOTER, false, 0);
 	chip->taper_work_running = false;
 }
 
@@ -810,6 +860,14 @@
 	 */
 	vote(chip->disable_votable, USER_VOTER, true, 0);
 
+	/*
+	 * In case SMB1390 probe happens after FCC value has been configured,
+	 * update ilim vote to reflect FCC / 2 value.
+	 */
+	if (chip->fcc_votable)
+		vote(chip->ilim_votable, FCC_VOTER, true,
+			get_effective_result(chip->fcc_votable) / 2);
+
 	return 0;
 }
 
@@ -926,6 +984,7 @@
 	chip->dev = &pdev->dev;
 	spin_lock_init(&chip->status_change_lock);
 	mutex_init(&chip->die_chan_lock);
+	chip->switcher_disabled = true;
 
 	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
 	if (!chip->regmap) {
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 2aae8bd..442a0d5 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -38,6 +38,8 @@
 	|| typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH)	\
 	&& !chg->typec_legacy)
 
+static void update_sw_icl_max(struct smb_charger *chg, int pst);
+
 int smblib_read(struct smb_charger *chg, u16 addr, u8 *val)
 {
 	unsigned int value;
@@ -156,15 +158,50 @@
 	return 0;
 }
 
-int smblib_icl_override(struct smb_charger *chg, bool override)
+int smblib_icl_override(struct smb_charger *chg, enum icl_override_mode  mode)
 {
 	int rc;
+	u8 usb51_mode, icl_override, apsd_override;
+
+	switch (mode) {
+	case SW_OVERRIDE_USB51_MODE:
+		usb51_mode = 0;
+		icl_override = ICL_OVERRIDE_BIT;
+		apsd_override = 0;
+		break;
+	case SW_OVERRIDE_HC_MODE:
+		usb51_mode = USBIN_MODE_CHG_BIT;
+		icl_override = 0;
+		apsd_override = ICL_OVERRIDE_AFTER_APSD_BIT;
+		break;
+	case HW_AUTO_MODE:
+	default:
+		usb51_mode = USBIN_MODE_CHG_BIT;
+		icl_override = 0;
+		apsd_override = 0;
+		break;
+	}
+
+	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
+				USBIN_MODE_CHG_BIT, usb51_mode);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set USBIN_ICL_OPTIONS rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smblib_masked_write(chg, CMD_ICL_OVERRIDE_REG,
+				ICL_OVERRIDE_BIT, icl_override);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
+		return rc;
+	}
 
 	rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
-				ICL_OVERRIDE_AFTER_APSD_BIT,
-				override ? ICL_OVERRIDE_AFTER_APSD_BIT : 0);
-	if (rc < 0)
-		smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
+				ICL_OVERRIDE_AFTER_APSD_BIT, apsd_override);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't override ICL_AFTER_APSD rc=%d\n", rc);
+		return rc;
+	}
 
 	return rc;
 }
@@ -176,7 +213,7 @@
 static int smblib_select_sec_charger_locked(struct smb_charger *chg,
 					int sec_chg)
 {
-	int rc;
+	int rc = 0;
 
 	switch (sec_chg) {
 	case POWER_SUPPLY_CHARGER_SEC_CP:
@@ -191,12 +228,14 @@
 			return rc;
 		}
 		/* Enable Charge Pump, under HW control */
-		rc = smblib_write(chg, MISC_SMB_EN_CMD_REG,  EN_CP_CMD_BIT);
+		rc = smblib_masked_write(chg, MISC_SMB_EN_CMD_REG,
+					EN_CP_CMD_BIT, EN_CP_CMD_BIT);
 		if (rc < 0) {
 			dev_err(chg->dev, "Couldn't enable SMB charger rc=%d\n",
 						rc);
 			return rc;
 		}
+		vote(chg->smb_override_votable, PL_SMB_EN_VOTER, false, 0);
 		break;
 	case POWER_SUPPLY_CHARGER_SEC_PL:
 		/* select slave charger instead of Charge Pump */
@@ -208,12 +247,14 @@
 			return rc;
 		}
 		/* Enable slave charger, under HW control */
-		rc = smblib_write(chg, MISC_SMB_EN_CMD_REG,  EN_STAT_CMD_BIT);
+		rc = smblib_masked_write(chg, MISC_SMB_EN_CMD_REG,
+					EN_STAT_CMD_BIT, EN_STAT_CMD_BIT);
 		if (rc < 0) {
 			dev_err(chg->dev, "Couldn't enable SMB charger rc=%d\n",
 						rc);
 			return rc;
 		}
+		vote(chg->smb_override_votable, PL_SMB_EN_VOTER, false, 0);
 
 		vote(chg->pl_disable_votable, PL_SMB_EN_VOTER, false, 0);
 
@@ -223,13 +264,7 @@
 		vote(chg->pl_disable_votable, PL_SMB_EN_VOTER, true, 0);
 
 		/* SW override, disabling secondary charger(s) */
-		rc = smblib_write(chg, MISC_SMB_EN_CMD_REG,
-						SMB_EN_OVERRIDE_BIT);
-		if (rc < 0) {
-			dev_err(chg->dev, "Couldn't disable charging rc=%d\n",
-						rc);
-			return rc;
-		}
+		vote(chg->smb_override_votable, PL_SMB_EN_VOTER, true, 0);
 		break;
 	}
 
@@ -908,7 +943,7 @@
 			schedule_work(&chg->bms_update_work);
 	}
 
-	if (!chg->jeita_configured)
+	if (chg->jeita_configured == JEITA_CFG_NONE)
 		schedule_work(&chg->jeita_update_work);
 
 	if (chg->sec_pl_present && !chg->pl.psy
@@ -1166,42 +1201,25 @@
 	}
 
 	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
-		CFG_USB3P0_SEL_BIT | USB51_MODE_BIT | USBIN_MODE_CHG_BIT,
-		icl_options);
+			CFG_USB3P0_SEL_BIT | USB51_MODE_BIT, icl_options);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't set ICL options rc=%d\n", rc);
 		return rc;
 	}
 
-	return rc;
-}
-
-static int get_sdp_current(struct smb_charger *chg, int *icl_ua)
-{
-	int rc;
-	u8 icl_options;
-	bool usb3 = false;
-
-	rc = smblib_read(chg, USBIN_ICL_OPTIONS_REG, &icl_options);
+	rc = smblib_icl_override(chg, SW_OVERRIDE_USB51_MODE);
 	if (rc < 0) {
-		smblib_err(chg, "Couldn't get ICL options rc=%d\n", rc);
+		smblib_err(chg, "Couldn't set ICL override rc=%d\n", rc);
 		return rc;
 	}
 
-	usb3 = (icl_options & CFG_USB3P0_SEL_BIT);
-
-	if (icl_options & USB51_MODE_BIT)
-		*icl_ua = usb3 ? USBIN_900MA : USBIN_500MA;
-	else
-		*icl_ua = usb3 ? USBIN_150MA : USBIN_100MA;
-
 	return rc;
 }
 
 int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
 {
 	int rc = 0;
-	bool hc_mode = false, override = false;
+	enum icl_override_mode icl_override = HW_AUTO_MODE;
 	/* suspend if 25mA or less is requested */
 	bool suspend = (icl_ua <= USBIN_25MA);
 
@@ -1249,25 +1267,11 @@
 			smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
 			goto out;
 		}
-		hc_mode = true;
-
-		/*
-		 * Micro USB mode follows ICL register independent of override
-		 * bit, configure override only for typeC mode.
-		 */
-		if (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC)
-			override = true;
+		icl_override = SW_OVERRIDE_HC_MODE;
 	}
 
 set_mode:
-	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
-		USBIN_MODE_CHG_BIT, hc_mode ? USBIN_MODE_CHG_BIT : 0);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't set USBIN_ICL_OPTIONS rc=%d\n", rc);
-		goto out;
-	}
-
-	rc = smblib_icl_override(chg, override);
+	rc = smblib_icl_override(chg, icl_override);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't set ICL override rc=%d\n", rc);
 		goto out;
@@ -1289,38 +1293,13 @@
 
 int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua)
 {
-	int rc = 0;
-	u8 load_cfg;
-	bool override;
+	int rc;
 
-	if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
-		|| chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
-		&& (chg->usb_psy->desc->type == POWER_SUPPLY_TYPE_USB)) {
-		rc = get_sdp_current(chg, icl_ua);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't get SDP ICL rc=%d\n", rc);
-			return rc;
-		}
-	} else {
-		rc = smblib_read(chg, USBIN_LOAD_CFG_REG, &load_cfg);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't get load cfg rc=%d\n", rc);
-			return rc;
-		}
-		override = load_cfg & ICL_OVERRIDE_AFTER_APSD_BIT;
-		if (!override)
-			return INT_MAX;
+	rc = smblib_get_charge_param(chg, &chg->param.icl_max_stat, icl_ua);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't get HC ICL rc=%d\n", rc);
 
-		/* override is set */
-		rc = smblib_get_charge_param(chg, &chg->param.icl_max_stat,
-					icl_ua);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't get HC ICL rc=%d\n", rc);
-			return rc;
-		}
-	}
-
-	return 0;
+	return rc;
 }
 
 int smblib_toggle_smb_en(struct smb_charger *chg, int toggle)
@@ -1339,6 +1318,21 @@
 /*********************
  * VOTABLE CALLBACKS *
  *********************/
+static int smblib_smb_disable_override_vote_callback(struct votable *votable,
+			void *data, int disable_smb, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc = 0;
+
+	/* Enable/disable SMB_EN pin */
+	rc = smblib_masked_write(chg, MISC_SMB_EN_CMD_REG,
+			SMB_EN_OVERRIDE_BIT | SMB_EN_OVERRIDE_VALUE_BIT,
+			disable_smb ? SMB_EN_OVERRIDE_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't configure SMB_EN, rc=%d\n", rc);
+
+	return rc;
+}
 
 static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
 			int suspend, const char *client)
@@ -1873,6 +1867,19 @@
 	return 0;
 }
 
+int smblib_get_batt_current_now(struct smb_charger *chg,
+					union power_supply_propval *val)
+{
+	int rc;
+
+	rc = smblib_get_prop_from_bms(chg,
+			POWER_SUPPLY_PROP_CURRENT_NOW, val);
+	if (!rc)
+		val->intval *= (-1);
+
+	return rc;
+}
+
 /***********************
  * BATTERY PSY SETTERS *
  ***********************/
@@ -2263,6 +2270,7 @@
 				rc);
 		return rc;
 	}
+
 	return 0;
 }
 
@@ -2343,19 +2351,28 @@
 	}
 
 	if (chg->sec_chg_selected == POWER_SUPPLY_CHARGER_SEC_CP) {
-		rc = smblib_read_iio_channel(chg, chg->iio.smb_temp_chan,
-					DIV_FACTOR_DECIDEGC, &chg->smb_temp);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't read SMB TEMP channel, rc=%d\n",
+		if (!chg->cp_psy)
+			chg->cp_psy =
+				power_supply_get_by_name("charge_pump_master");
+		if (chg->cp_psy) {
+			rc = power_supply_get_property(chg->cp_psy,
+				POWER_SUPPLY_PROP_CP_DIE_TEMP, &pval);
+			if (rc < 0) {
+				smblib_err(chg, "Couldn't get smb1390 charger temp, rc=%d\n",
 					rc);
-			return rc;
+				return rc;
+			}
+			chg->smb_temp = pval.intval;
+		} else {
+			smblib_dbg(chg, PR_MISC, "Coudln't find cp_psy\n");
+			chg->smb_temp = -ENODATA;
 		}
 	} else if (chg->pl.psy && chg->sec_chg_selected ==
 					POWER_SUPPLY_CHARGER_SEC_PL) {
 		rc = power_supply_get_property(chg->pl.psy,
 				POWER_SUPPLY_PROP_CHARGER_TEMP, &pval);
 		if (rc < 0) {
-			smblib_err(chg, "Couldn't get smb charger temp, rc=%d\n",
+			smblib_err(chg, "Couldn't get smb1355 charger temp, rc=%d\n",
 					rc);
 			return rc;
 		}
@@ -2467,13 +2484,8 @@
 		if (chg->thermal_status == TEMP_ALERT_LEVEL)
 			goto exit;
 
-		/* Enable/disable SMB_EN pin */
-		rc = smblib_masked_write(chg, MISC_SMB_EN_CMD_REG,
-			SMB_EN_OVERRIDE_BIT | SMB_EN_OVERRIDE_VALUE_BIT,
-			(disable_smb ? SMB_EN_OVERRIDE_BIT :
-			(SMB_EN_OVERRIDE_BIT | SMB_EN_OVERRIDE_VALUE_BIT)));
-		if (rc < 0)
-			smblib_err(chg, "Couldn't set SMB_EN, rc=%d\n", rc);
+		vote(chg->smb_override_votable, SW_THERM_REGULATION_VOTER,
+				disable_smb, 0);
 
 		/*
 		 * Enable/disable secondary charger through votables to ensure
@@ -2714,36 +2726,14 @@
 	return 0;
 }
 
-static int smblib_estimate_hvdcp_voltage(struct smb_charger *chg,
-					 union power_supply_propval *val)
-{
-	int rc;
-	u8 stat;
-
-	rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read QC_CHANGE_STATUS_REG rc=%d\n",
-				rc);
-		return rc;
-	}
-
-	if (stat & QC_5V_BIT)
-		val->intval = MICRO_5V;
-	else if (stat & QC_9V_BIT)
-		val->intval = MICRO_9V;
-	else if (stat & QC_12V_BIT)
-		val->intval = MICRO_12V;
-
-	return 0;
-}
-
 #define HVDCP3_STEP_UV	200000
 static int smblib_estimate_adaptor_voltage(struct smb_charger *chg,
 					  union power_supply_propval *val)
 {
 	switch (chg->real_charger_type) {
 	case POWER_SUPPLY_TYPE_USB_HVDCP:
-		return smblib_estimate_hvdcp_voltage(chg, val);
+		val->intval = MICRO_12V;
+		break;
 	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
 		val->intval = MICRO_5V + (HVDCP3_STEP_UV * chg->pulse_cnt);
 		break;
@@ -3262,6 +3252,17 @@
 	return POWER_SUPPLY_HEALTH_COOL;
 }
 
+int smblib_get_die_health(struct smb_charger *chg,
+			union power_supply_propval *val)
+{
+	if (chg->die_health == -EINVAL)
+		val->intval = smblib_get_prop_die_health(chg);
+	else
+		val->intval = chg->die_health;
+
+	return 0;
+}
+
 int smblib_get_prop_connector_health(struct smb_charger *chg)
 {
 	int rc;
@@ -3579,6 +3580,8 @@
 int smblib_set_prop_pd_active(struct smb_charger *chg,
 				const union power_supply_propval *val)
 {
+	const struct apsd_result *apsd = smblib_get_apsd_result(chg);
+
 	int rc = 0;
 	int sec_charger;
 
@@ -3586,6 +3589,8 @@
 
 	smblib_apsd_enable(chg, !chg->pd_active);
 
+	update_sw_icl_max(chg, apsd->pst);
+
 	if (chg->pd_active) {
 		vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
 
@@ -3612,7 +3617,6 @@
 					rc);
 		}
 	} else {
-		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, SDP_100_MA);
 		vote(chg->usb_icl_votable, PD_VOTER, false, 0);
 		vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
 
@@ -3673,51 +3677,155 @@
 	return rc;
 }
 
-static int smblib_recover_from_soft_jeita(struct smb_charger *chg)
+#define JEITA_SOFT			0
+#define JEITA_HARD			1
+static int smblib_update_jeita(struct smb_charger *chg, u32 *thresholds,
+								int type)
 {
-	u8 stat1, stat7;
 	int rc;
+	u16 temp, base;
 
-	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat1);
+	base = CHGR_JEITA_THRESHOLD_BASE_REG(type);
+
+	temp = thresholds[1] & 0xFFFF;
+	temp = ((temp & 0xFF00) >> 8) | ((temp & 0xFF) << 8);
+	rc = smblib_batch_write(chg, base, (u8 *)&temp, 2);
 	if (rc < 0) {
-		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
-				rc);
+		smblib_err(chg,
+			"Couldn't configure Jeita %s hot threshold rc=%d\n",
+			(type == JEITA_SOFT) ? "Soft" : "Hard", rc);
 		return rc;
 	}
 
-	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_7_REG, &stat7);
+	temp = thresholds[0] & 0xFFFF;
+	temp = ((temp & 0xFF00) >> 8) | ((temp & 0xFF) << 8);
+	rc = smblib_batch_write(chg, base + 2, (u8 *)&temp, 2);
 	if (rc < 0) {
-		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
-				rc);
+		smblib_err(chg,
+			"Couldn't configure Jeita %s cold threshold rc=%d\n",
+			(type == JEITA_SOFT) ? "Soft" : "Hard", rc);
 		return rc;
 	}
 
-	if ((chg->jeita_status && !(stat7 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK) &&
-		((stat1 & BATTERY_CHARGER_STATUS_MASK) == TERMINATE_CHARGE))) {
-		/*
-		 * We are moving from JEITA soft -> Normal and charging
-		 * is terminated
-		 */
-		rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG, 0);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't disable charging rc=%d\n",
-						rc);
-			return rc;
-		}
-		rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG,
-						CHARGING_ENABLE_CMD_BIT);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't enable charging rc=%d\n",
-						rc);
-			return rc;
-		}
-	}
-
-	chg->jeita_status = stat7 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK;
+	smblib_dbg(chg, PR_MISC, "%s Jeita threshold configured\n",
+				(type == JEITA_SOFT) ? "Soft" : "Hard");
 
 	return 0;
 }
 
+static int smblib_charge_inhibit_en(struct smb_charger *chg, bool enable)
+{
+	int rc;
+
+	rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+					CHARGER_INHIBIT_BIT,
+					enable ? CHARGER_INHIBIT_BIT : 0);
+	return rc;
+}
+
+static int smblib_soft_jeita_arb_wa(struct smb_charger *chg)
+{
+	union power_supply_propval pval;
+	int rc = 0;
+	bool soft_jeita;
+
+	rc = smblib_get_prop_batt_health(chg, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get battery health rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Do nothing on entering hard JEITA condition */
+	if (pval.intval == POWER_SUPPLY_HEALTH_COLD ||
+		pval.intval == POWER_SUPPLY_HEALTH_HOT)
+		return 0;
+
+	if (chg->jeita_soft_fcc[0] < 0 || chg->jeita_soft_fcc[1] < 0 ||
+		chg->jeita_soft_fv[0] < 0 || chg->jeita_soft_fv[1] < 0)
+		return 0;
+
+	soft_jeita = (pval.intval == POWER_SUPPLY_HEALTH_COOL) ||
+			(pval.intval == POWER_SUPPLY_HEALTH_WARM);
+
+	/* Do nothing on entering soft JEITA from hard JEITA */
+	if (chg->jeita_arb_flag && soft_jeita)
+		return 0;
+
+	/* Do nothing, initial to health condition */
+	if (!chg->jeita_arb_flag && !soft_jeita)
+		return 0;
+
+	if (!chg->cp_disable_votable)
+		chg->cp_disable_votable = find_votable("CP_DISABLE");
+
+	/* Entering soft JEITA from normal state */
+	if (!chg->jeita_arb_flag && soft_jeita) {
+		vote(chg->chg_disable_votable, JEITA_ARB_VOTER, true, 0);
+		/* Disable parallel charging */
+		if (chg->pl_disable_votable)
+			vote(chg->pl_disable_votable, JEITA_ARB_VOTER, true, 0);
+		if (chg->cp_disable_votable)
+			vote(chg->cp_disable_votable, JEITA_ARB_VOTER, true, 0);
+
+		rc = smblib_charge_inhibit_en(chg, true);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable charge inhibit rc=%d\n",
+					rc);
+
+		rc = smblib_update_jeita(chg, chg->jeita_soft_hys_thlds,
+					JEITA_SOFT);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't configure Jeita soft threshold rc=%d\n",
+				rc);
+
+		if (pval.intval == POWER_SUPPLY_HEALTH_COOL) {
+			vote(chg->fcc_votable, JEITA_ARB_VOTER, true,
+						chg->jeita_soft_fcc[0]);
+			vote(chg->fv_votable, JEITA_ARB_VOTER, true,
+						chg->jeita_soft_fv[0]);
+		} else {
+			vote(chg->fcc_votable, JEITA_ARB_VOTER, true,
+						chg->jeita_soft_fcc[1]);
+			vote(chg->fv_votable, JEITA_ARB_VOTER, true,
+						chg->jeita_soft_fv[1]);
+		}
+
+		vote(chg->chg_disable_votable, JEITA_ARB_VOTER, false, 0);
+		chg->jeita_arb_flag = true;
+	} else if (chg->jeita_arb_flag && !soft_jeita) {
+		/* Exit to health state from soft JEITA */
+
+		vote(chg->chg_disable_votable, JEITA_ARB_VOTER, true, 0);
+
+		rc = smblib_charge_inhibit_en(chg, false);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't disable charge inhibit rc=%d\n",
+					rc);
+
+		rc = smblib_update_jeita(chg, chg->jeita_soft_thlds,
+							JEITA_SOFT);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't configure Jeita soft threshold rc=%d\n",
+				rc);
+
+		vote(chg->fcc_votable, JEITA_ARB_VOTER, false, 0);
+		vote(chg->fv_votable, JEITA_ARB_VOTER, false, 0);
+		if (chg->pl_disable_votable)
+			vote(chg->pl_disable_votable, JEITA_ARB_VOTER, false,
+				0);
+		if (chg->cp_disable_votable)
+			vote(chg->cp_disable_votable, JEITA_ARB_VOTER, false,
+				0);
+		vote(chg->chg_disable_votable, JEITA_ARB_VOTER, false, 0);
+		chg->jeita_arb_flag = false;
+	}
+
+	smblib_dbg(chg, PR_MISC, "JEITA ARB status %d, soft JEITA status %d\n",
+			chg->jeita_arb_flag, soft_jeita);
+	return rc;
+}
+
 /************************
  * USB MAIN PSY GETTERS *
  ************************/
@@ -3873,15 +3981,18 @@
 	struct smb_charger *chg = irq_data->parent_data;
 	int rc;
 
-	rc = smblib_recover_from_soft_jeita(chg);
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	if (chg->jeita_configured != JEITA_CFG_COMPLETE)
+		return IRQ_HANDLED;
+
+	rc = smblib_soft_jeita_arb_wa(chg);
 	if (rc < 0) {
-		smblib_err(chg, "Couldn't recover chg from soft jeita rc=%d\n",
+		smblib_err(chg, "Couldn't fix soft jeita arb rc=%d\n",
 				rc);
 		return IRQ_HANDLED;
 	}
 
-	rerun_election(chg->fcc_votable);
-	power_supply_changed(chg->batt_psy);
 	return IRQ_HANDLED;
 }
 
@@ -4218,10 +4329,8 @@
 
 static void update_sw_icl_max(struct smb_charger *chg, int pst)
 {
-	union power_supply_propval pval;
 	int typec_mode;
 	int rp_ua;
-	int rc;
 
 	/* while PD is active it should have complete ICL control */
 	if (chg->pd_active)
@@ -4273,15 +4382,8 @@
 		break;
 	case POWER_SUPPLY_TYPE_UNKNOWN:
 	default:
-		rc = smblib_get_prop_usb_present(chg, &pval);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't get usb present rc = %d\n",
-					rc);
-			return;
-		}
-
 		vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
-				pval.intval ? SDP_CURRENT_UA : SDP_100_MA);
+					SDP_100_MA);
 		break;
 	}
 }
@@ -4331,7 +4433,7 @@
 		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
 		return IRQ_HANDLED;
 	}
-	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+	smblib_dbg(chg, PR_INTERRUPT, "APSD_STATUS = 0x%02x\n", stat);
 
 	if ((chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
 		&& (stat & APSD_DTC_STATUS_DONE_BIT)
@@ -4373,7 +4475,7 @@
 		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
 		return IRQ_HANDLED;
 	}
-	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+	smblib_dbg(chg, PR_INTERRUPT, "APSD_STATUS = 0x%02x\n", stat);
 
 	return IRQ_HANDLED;
 }
@@ -4458,7 +4560,13 @@
 
 static void typec_sink_insertion(struct smb_charger *chg)
 {
+	int rc;
+
 	vote(chg->usb_icl_votable, OTG_VOTER, true, 0);
+	rc = smblib_set_charge_param(chg, &chg->param.freq_switcher,
+					chg->chg_freq.freq_above_otg_threshold);
+	if (rc < 0)
+		dev_err(chg->dev, "Error in setting freq_boost rc=%d\n", rc);
 
 	if (chg->use_extcon) {
 		smblib_notify_usb_host(chg, true);
@@ -4496,7 +4604,13 @@
 
 static void typec_sink_removal(struct smb_charger *chg)
 {
+	int rc;
+
 	vote(chg->usb_icl_votable, OTG_VOTER, false, 0);
+	rc = smblib_set_charge_param(chg, &chg->param.freq_switcher,
+					chg->chg_freq.freq_removal);
+	if (rc < 0)
+		dev_err(chg->dev, "Error in setting freq_removal rc=%d\n", rc);
 
 	if (chg->use_extcon) {
 		if (chg->otg_present)
@@ -4959,12 +5073,43 @@
 	if (rc < 0)
 		smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc);
 
-	if (chg->step_chg_enabled || chg->sw_jeita_enabled)
+	if (chg->step_chg_enabled)
 		power_supply_changed(chg->batt_psy);
 
 	return IRQ_HANDLED;
 }
 
+static void smblib_die_rst_icl_regulate(struct smb_charger *chg)
+{
+	int rc;
+	u8 temp;
+
+	rc = smblib_read(chg, DIE_TEMP_STATUS_REG, &temp);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read DIE_TEMP_STATUS_REG rc=%d\n",
+				rc);
+		return;
+	}
+
+	/* Regulate ICL on die temp crossing DIE_RST threshold */
+	vote(chg->usb_icl_votable, DIE_TEMP_VOTER,
+				temp & DIE_TEMP_RST_BIT, 500000);
+}
+
+/*
+ * triggered when DIE or SKIN or CONNECTOR temperature across
+ * either of the _REG_L, _REG_H, _RST, or _SHDN thresholds
+ */
+irqreturn_t temp_change_irq_handler(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_die_rst_icl_regulate(chg);
+
+	return IRQ_HANDLED;
+}
+
 /**************
  * Additional USB PSY getters/setters
  * that call interrupt functions
@@ -5173,42 +5318,6 @@
 					rc);
 }
 
-#define JEITA_SOFT			0
-#define JEITA_HARD			1
-static int smblib_update_jeita(struct smb_charger *chg, u32 *thresholds,
-								int type)
-{
-	int rc;
-	u16 temp, base;
-
-	base = CHGR_JEITA_THRESHOLD_BASE_REG(type);
-
-	temp = thresholds[1] & 0xFFFF;
-	temp = ((temp & 0xFF00) >> 8) | ((temp & 0xFF) << 8);
-	rc = smblib_batch_write(chg, base, (u8 *)&temp, 2);
-	if (rc < 0) {
-		smblib_err(chg,
-			"Couldn't configure Jeita %s hot threshold rc=%d\n",
-			(type == JEITA_SOFT) ? "Soft" : "Hard", rc);
-		return rc;
-	}
-
-	temp = thresholds[0] & 0xFFFF;
-	temp = ((temp & 0xFF00) >> 8) | ((temp & 0xFF) << 8);
-	rc = smblib_batch_write(chg, base + 2, (u8 *)&temp, 2);
-	if (rc < 0) {
-		smblib_err(chg,
-			"Couldn't configure Jeita %s cold threshold rc=%d\n",
-			(type == JEITA_SOFT) ? "Soft" : "Hard", rc);
-		return rc;
-	}
-
-	smblib_dbg(chg, PR_MISC, "%s Jeita threshold configured\n",
-				(type == JEITA_SOFT) ? "Soft" : "Hard");
-
-	return 0;
-}
-
 static void jeita_update_work(struct work_struct *work)
 {
 	struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -5216,8 +5325,8 @@
 	struct device_node *node = chg->dev->of_node;
 	struct device_node *batt_node, *pnode;
 	union power_supply_propval val;
-	int rc;
-	u32 jeita_thresholds[2];
+	int rc, tmp[2], max_fcc_ma, max_fv_uv;
+	u32 jeita_hard_thresholds[2];
 
 	batt_node = of_find_node_by_name(node, "qcom,battery-data");
 	if (!batt_node) {
@@ -5250,9 +5359,10 @@
 	}
 
 	rc = of_property_read_u32_array(pnode, "qcom,jeita-hard-thresholds",
-				jeita_thresholds, 2);
+				jeita_hard_thresholds, 2);
 	if (!rc) {
-		rc = smblib_update_jeita(chg, jeita_thresholds, JEITA_HARD);
+		rc = smblib_update_jeita(chg, jeita_hard_thresholds,
+					JEITA_HARD);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't configure Hard Jeita rc=%d\n",
 					rc);
@@ -5261,18 +5371,83 @@
 	}
 
 	rc = of_property_read_u32_array(pnode, "qcom,jeita-soft-thresholds",
-				jeita_thresholds, 2);
+				chg->jeita_soft_thlds, 2);
 	if (!rc) {
-		rc = smblib_update_jeita(chg, jeita_thresholds, JEITA_SOFT);
+		rc = smblib_update_jeita(chg, chg->jeita_soft_thlds,
+					JEITA_SOFT);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't configure Soft Jeita rc=%d\n",
 					rc);
 			goto out;
 		}
+
+		rc = of_property_read_u32_array(pnode,
+					"qcom,jeita-soft-hys-thresholds",
+					chg->jeita_soft_hys_thlds, 2);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't get Soft Jeita hysteresis thresholds rc=%d\n",
+					rc);
+			goto out;
+		}
 	}
 
+	chg->jeita_soft_fcc[0] = chg->jeita_soft_fcc[1] = -EINVAL;
+	chg->jeita_soft_fv[0] = chg->jeita_soft_fv[1] = -EINVAL;
+	max_fcc_ma = max_fv_uv = -EINVAL;
+
+	of_property_read_u32(pnode, "qcom,fastchg-current-ma", &max_fcc_ma);
+	of_property_read_u32(pnode, "qcom,max-voltage-uv", &max_fv_uv);
+
+	if (max_fcc_ma <= 0 || max_fv_uv <= 0) {
+		smblib_err(chg, "Incorrect fastchg-current-ma or max-voltage-uv\n");
+		goto out;
+	}
+
+	rc = of_property_read_u32_array(pnode, "qcom,jeita-soft-fcc-ua",
+					tmp, 2);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get fcc values for soft JEITA rc=%d\n",
+				rc);
+		goto out;
+	}
+
+	max_fcc_ma *= 1000;
+	if (tmp[0] > max_fcc_ma || tmp[1] > max_fcc_ma) {
+		smblib_err(chg, "Incorrect FCC value [%d %d] max: %d\n", tmp[0],
+			tmp[1], max_fcc_ma);
+		goto out;
+	}
+	chg->jeita_soft_fcc[0] = tmp[0];
+	chg->jeita_soft_fcc[1] = tmp[1];
+
+	rc = of_property_read_u32_array(pnode, "qcom,jeita-soft-fv-uv", tmp,
+					2);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get fv values for soft JEITA rc=%d\n",
+				rc);
+		goto out;
+	}
+
+	if (tmp[0] > max_fv_uv || tmp[1] > max_fv_uv) {
+		smblib_err(chg, "Incorrect FV value [%d %d] max: %d\n", tmp[0],
+			tmp[1], max_fv_uv);
+		goto out;
+	}
+	chg->jeita_soft_fv[0] = tmp[0];
+	chg->jeita_soft_fv[1] = tmp[1];
+
+	rc = smblib_soft_jeita_arb_wa(chg);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't fix soft jeita arb rc=%d\n",
+				rc);
+		goto out;
+	}
+
+	chg->jeita_configured = JEITA_CFG_COMPLETE;
+	return;
+
 out:
-	chg->jeita_configured = true;
+	chg->jeita_configured = JEITA_CFG_FAILURE;
 }
 
 static void smblib_lpd_ra_open_work(struct work_struct *work)
@@ -5418,6 +5593,15 @@
 
 	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
 
+	chg->smb_override_votable = create_votable("SMB_EN_OVERRIDE",
+				VOTE_SET_ANY,
+				smblib_smb_disable_override_vote_callback, chg);
+	if (IS_ERR(chg->smb_override_votable)) {
+		rc = PTR_ERR(chg->smb_override_votable);
+		chg->smb_override_votable = NULL;
+		return rc;
+	}
+
 	chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY,
 					smblib_dc_suspend_vote_callback,
 					chg);
@@ -5492,6 +5676,14 @@
 		iio_channel_release(chg->iio.sbux_chan);
 	if (!IS_ERR_OR_NULL(chg->iio.vph_v_chan))
 		iio_channel_release(chg->iio.vph_v_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.die_temp_chan))
+		iio_channel_release(chg->iio.die_temp_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.connector_temp_chan))
+		iio_channel_release(chg->iio.connector_temp_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.skin_temp_chan))
+		iio_channel_release(chg->iio.skin_temp_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.smb_temp_chan))
+		iio_channel_release(chg->iio.smb_temp_chan);
 }
 
 int smblib_init(struct smb_charger *chg)
@@ -5519,6 +5711,7 @@
 	chg->jeita_configured = false;
 	chg->sec_chg_selected = POWER_SUPPLY_CHARGER_SEC_NONE;
 	chg->cp_reason = POWER_SUPPLY_CP_NONE;
+	chg->thermal_status = TEMP_BELOW_RANGE;
 
 	switch (chg->mode) {
 	case PARALLEL_MASTER:
@@ -5530,7 +5723,7 @@
 		}
 
 		rc = qcom_step_chg_init(chg->dev, chg->step_chg_enabled,
-						chg->sw_jeita_enabled);
+						chg->sw_jeita_enabled, false);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't init qcom_step_chg_init rc=%d\n",
 				rc);
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 2c28443..60b57f1 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef __SMB5_CHARGER_H
@@ -35,6 +35,7 @@
 #define CHG_STATE_VOTER			"CHG_STATE_VOTER"
 #define TAPER_END_VOTER			"TAPER_END_VOTER"
 #define THERMAL_DAEMON_VOTER		"THERMAL_DAEMON_VOTER"
+#define DIE_TEMP_VOTER			"DIE_TEMP_VOTER"
 #define BOOST_BACK_VOTER		"BOOST_BACK_VOTER"
 #define MICRO_USB_VOTER			"MICRO_USB_VOTER"
 #define DEBUG_BOARD_VOTER		"DEBUG_BOARD_VOTER"
@@ -59,6 +60,7 @@
 #define LPD_VOTER			"LPD_VOTER"
 #define FCC_STEPPER_VOTER		"FCC_STEPPER_VOTER"
 #define SW_THERM_REGULATION_VOTER	"SW_THERM_REGULATION_VOTER"
+#define JEITA_ARB_VOTER			"JEITA_ARB_VOTER"
 
 #define BOOST_BACK_STORM_COUNT	3
 #define WEAK_CHG_STORM_COUNT	8
@@ -90,6 +92,12 @@
 	SW_THERM_REGULATION_WA		= BIT(1),
 };
 
+enum jeita_cfg_stat {
+	JEITA_CFG_NONE = 0,
+	JEITA_CFG_FAILURE,
+	JEITA_CFG_COMPLETE,
+};
+
 enum smb_irq_index {
 	/* CHGR */
 	CHGR_ERROR_IRQ = 0,
@@ -221,6 +229,15 @@
 	TEMP_BELOW_RANGE,
 };
 
+enum icl_override_mode {
+	/* APSD/Type-C/QC auto */
+	HW_AUTO_MODE,
+	/* 100/150/500/900mA */
+	SW_OVERRIDE_USB51_MODE,
+	/* ICL other than USB51 */
+	SW_OVERRIDE_HC_MODE,
+};
+
 /* EXTCON_USB and EXTCON_USB_HOST are mutually exclusive */
 static const u32 smblib_extcon_exclusive[] = {0x3, 0};
 
@@ -322,6 +339,7 @@
 	struct power_supply		*usb_main_psy;
 	struct power_supply		*usb_port_psy;
 	struct power_supply		*wls_psy;
+	struct power_supply		*cp_psy;
 	enum power_supply_type		real_charger_type;
 
 	/* notifiers */
@@ -347,6 +365,7 @@
 	struct votable		*usb_irq_enable_votable;
 	struct votable		*cp_disable_votable;
 	struct votable		*wdog_snarl_irq_en_votable;
+	struct votable		*smb_override_votable;
 
 	/* work */
 	struct work_struct	bms_update_work;
@@ -405,13 +424,14 @@
 	int			usb_icl_change_irq_enabled;
 	u32			jeita_status;
 	u8			float_cfg;
+	bool			jeita_arb_flag;
 	bool			use_extcon;
 	bool			otg_present;
 	bool			hvdcp_disable;
 	int			hw_max_icl_ua;
 	int			auto_recharge_soc;
 	enum sink_src_mode	sink_src_mode;
-	bool			jeita_configured;
+	enum jeita_cfg_stat	jeita_configured;
 	int			charger_temp_max;
 	int			smb_temp_max;
 	u8			typec_try_mode;
@@ -424,6 +444,10 @@
 	int			connector_temp;
 	int			thermal_status;
 	int			main_fcc_max;
+	u32			jeita_soft_thlds[2];
+	u32			jeita_soft_hys_thlds[2];
+	int			jeita_soft_fcc[2];
+	int			jeita_soft_fv[2];
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -505,6 +529,7 @@
 irqreturn_t wdog_snarl_irq_handler(int irq, void *data);
 irqreturn_t wdog_bark_irq_handler(int irq, void *data);
 irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data);
+irqreturn_t temp_change_irq_handler(int irq, void *data);
 
 int smblib_get_prop_input_suspend(struct smb_charger *chg,
 				union power_supply_propval *val);
@@ -518,6 +543,8 @@
 				union power_supply_propval *val);
 int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
 				union power_supply_propval *val);
+int smblib_get_batt_current_now(struct smb_charger *chg,
+					union power_supply_propval *val);
 int smblib_get_prop_batt_health(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_system_temp_level(struct smb_charger *chg,
@@ -584,6 +611,8 @@
 int smblib_get_prop_charger_temp(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_die_health(struct smb_charger *chg);
+int smblib_get_die_health(struct smb_charger *chg,
+				union power_supply_propval *val);
 int smblib_get_prop_connector_health(struct smb_charger *chg);
 int smblib_set_prop_pd_current_max(struct smb_charger *chg,
 				const union power_supply_propval *val);
@@ -628,7 +657,7 @@
 int smblib_read_iio_channel(struct smb_charger *chg, struct iio_channel *chan,
 							int div, int *data);
 int smblib_configure_hvdcp_apsd(struct smb_charger *chg, bool enable);
-int smblib_icl_override(struct smb_charger *chg, bool override);
+int smblib_icl_override(struct smb_charger *chg, enum icl_override_mode mode);
 enum alarmtimer_restart smblib_lpd_recheck_timer(struct alarm *alarm,
 				ktime_t time);
 int smblib_toggle_smb_en(struct smb_charger *chg, int toggle);
diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h
index 1cd9c74..c6d8573 100644
--- a/drivers/power/supply/qcom/smb5-reg.h
+++ b/drivers/power/supply/qcom/smb5-reg.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef __SMB5_CHARGER_REG_H
@@ -50,6 +50,7 @@
 #define BAT_TEMP_STATUS_SOFT_LIMIT_MASK		GENMASK(5, 4)
 #define BAT_TEMP_STATUS_HOT_SOFT_BIT		BIT(5)
 #define BAT_TEMP_STATUS_COLD_SOFT_BIT		BIT(4)
+#define BAT_TEMP_STATUS_HARD_LIMIT_MASK		GENMASK(3, 2)
 #define BAT_TEMP_STATUS_TOO_HOT_BIT		BIT(3)
 #define BAT_TEMP_STATUS_TOO_COLD_BIT		BIT(2)
 #define BAT_TEMP_STATUS_TOO_HOT_AFP_BIT		BIT(1)
@@ -103,6 +104,7 @@
 #define JEITA_CCCOMP_CFG_COLD_REG		(CHGR_BASE + 0x93)
 
 #define CHGR_JEITA_THRESHOLD_BASE_REG(i)	(CHGR_BASE + 0x94 + (i * 4))
+#define CHGR_JEITA_HOT_THRESHOLD_MSB_REG	CHGR_JEITA_THRESHOLD_BASE_REG(0)
 
 #define CHGR_FAST_CHARGE_SAFETY_TIMER_CFG_REG	(CHGR_BASE + 0xA2)
 #define FAST_CHARGE_SAFETY_TIMER_192_MIN	0x0
@@ -214,6 +216,9 @@
 #define CMD_APSD_REG				(USBIN_BASE + 0x41)
 #define APSD_RERUN_BIT				BIT(0)
 
+#define CMD_ICL_OVERRIDE_REG			(USBIN_BASE + 0x42)
+#define ICL_OVERRIDE_BIT			BIT(0)
+
 #define CMD_HVDCP_2_REG				(USBIN_BASE + 0x43)
 #define FORCE_12V_BIT				BIT(5)
 #define FORCE_9V_BIT				BIT(4)
@@ -457,6 +462,7 @@
 #define AICL_RERUN_TIME_12S_VAL			0x01
 
 #define MISC_THERMREG_SRC_CFG_REG		(MISC_BASE + 0x70)
+#define THERMREG_SW_ICL_ADJUST_BIT		BIT(7)
 #define THERMREG_SMB_ADC_SRC_EN_BIT		BIT(5)
 #define THERMREG_DIE_CMP_SRC_EN_BIT		BIT(0)
 
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
index 23f835e..82e7eb8 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.c
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "QCOM-STEPCHG: %s: " fmt, __func__
@@ -44,6 +44,7 @@
 	ktime_t			jeita_last_update_time;
 	bool			step_chg_enable;
 	bool			sw_jeita_enable;
+	bool			jeita_arb_en;
 	bool			config_is_read;
 	bool			step_chg_cfg_valid;
 	bool			sw_jeita_cfg_valid;
@@ -595,7 +596,7 @@
 	 * Suspend USB input path if battery voltage is above
 	 * JEITA VFLOAT threshold.
 	 */
-	if (fv_uv > 0) {
+	if (chip->jeita_arb_en && fv_uv > 0) {
 		rc = power_supply_get_property(chip->batt_psy,
 				POWER_SUPPLY_PROP_VOLTAGE_NOW, &pval);
 		if (!rc && (pval.intval > fv_uv))
@@ -747,7 +748,7 @@
 }
 
 int qcom_step_chg_init(struct device *dev,
-		bool step_chg_enable, bool sw_jeita_enable)
+		bool step_chg_enable, bool sw_jeita_enable, bool jeita_arb_en)
 {
 	int rc;
 	struct step_chg_info *chip;
@@ -768,6 +769,7 @@
 	chip->dev = dev;
 	chip->step_chg_enable = step_chg_enable;
 	chip->sw_jeita_enable = sw_jeita_enable;
+	chip->jeita_arb_en = jeita_arb_en;
 	chip->step_index = -EINVAL;
 	chip->jeita_fcc_index = -EINVAL;
 	chip->jeita_fv_index = -EINVAL;
diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h
index f5431b6..9936e31 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.h
+++ b/drivers/power/supply/qcom/step-chg-jeita.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
  */
 
 #ifndef __STEP_CHG_H__
@@ -22,7 +22,7 @@
 };
 
 int qcom_step_chg_init(struct device *dev,
-		bool step_chg_enable, bool sw_jeita_enable);
+		bool step_chg_enable, bool sw_jeita_enable, bool jeita_arb_en);
 void qcom_step_chg_deinit(void);
 int read_range_data_from_node(struct device_node *node,
 		const char *prop_str, struct range_data *ranges,
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index ad03e2f..5808a1e 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -393,7 +393,7 @@
 	alrm->time.tm_min  = bcd2bin(alarmvals[3] & 0x7f);
 	alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f);
 	alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f);
-	alrm->time.tm_mon  = bcd2bin(alarmvals[0] & 0x3f);
+	alrm->time.tm_mon  = bcd2bin(alarmvals[0] & 0x3f) - 1;
 
 	alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE);
 	alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled;
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 194ffd5..039b207 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -60,7 +60,9 @@
 
 static void __ref sclp_cpu_change_notify(struct work_struct *work)
 {
+	lock_device_hotplug();
 	smp_rescan_cpus();
+	unlock_device_hotplug();
 }
 
 static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 94f4d8f..d1b531f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -275,16 +275,16 @@
  */
 int zfcp_status_read_refill(struct zfcp_adapter *adapter)
 {
-	while (atomic_read(&adapter->stat_miss) > 0)
+	while (atomic_add_unless(&adapter->stat_miss, -1, 0))
 		if (zfcp_fsf_status_read(adapter->qdio)) {
+			atomic_inc(&adapter->stat_miss); /* undo add -1 */
 			if (atomic_read(&adapter->stat_miss) >=
 			    adapter->stat_read_buf_num) {
 				zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
 				return 1;
 			}
 			break;
-		} else
-			atomic_dec(&adapter->stat_miss);
+		}
 	return 0;
 }
 
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f000458..3f97ec4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2371,7 +2371,7 @@
 	if (!interface) {
 		printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
 		rc = -ENOMEM;
-		goto ifput_err;
+		goto netdev_err;
 	}
 
 	if (is_vlan_dev(netdev)) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index be2bac9..a490e63 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -14221,7 +14221,8 @@
 			hw_page_size))/hw_page_size;
 
 	/* If needed, Adjust page count to match the max the adapter supports */
-	if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
+	if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
+	    (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
 		queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
 
 	INIT_LIST_HEAD(&queue->list);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 59ecbb3..a336285 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -1266,7 +1266,7 @@
 
 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
 		ld = MR_TargetIdToLdGet(ldCount, drv_map);
-		if (ld >= MAX_LOGICAL_DRIVES_EXT) {
+		if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
 			lbInfo[ldCount].loadBalanceFlag = 0;
 			continue;
 		}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index c7f95ba..f45c54f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2832,7 +2832,7 @@
 		device_id < instance->fw_supported_vd_count)) {
 
 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
-		if (ld >= instance->fw_supported_vd_count)
+		if (ld >= instance->fw_supported_vd_count - 1)
 			fp_possible = 0;
 		else {
 			raid = MR_LdRaidGet(ld, local_map_ptr);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 59d7844..b59bba3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3344,8 +3344,9 @@
 static inline void
 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
 {
+	wmb();
 	__raw_writeq(b, addr);
-	mmiowb();
+	barrier();
 }
 #else
 static inline void
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e5bd035..4de740d 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -952,6 +952,9 @@
 		cls_sess = iscsi_conn_to_session(cls_conn);
 		sess = cls_sess->dd_data;
 
+		if (!iscsi_is_session_online(cls_sess))
+			continue;
+
 		if (pri_ctrl_flags) {
 			if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
 			    !strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ae9fd2d..42b8f0d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4808,10 +4808,10 @@
 			fcport->d_id = e->u.new_sess.id;
 			fcport->flags |= FCF_FABRIC_DEVICE;
 			fcport->fw_login_state = DSC_LS_PLOGI_PEND;
-			if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
+			if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
 				fcport->fc4_type = FC4_TYPE_FCP_SCSI;
 
-			if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
+			if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
 				fcport->fc4_type = FC4_TYPE_OTHER;
 				fcport->fc4f_nvme = FC4_TYPE_NVME;
 			}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ade9adc..46216e45 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -132,6 +132,7 @@
 
 static struct kmem_cache *sd_cdb_cache;
 static mempool_t *sd_cdb_pool;
+static mempool_t *sd_page_pool;
 
 static const char *sd_cache_types[] = {
 	"write through", "none", "write back",
@@ -204,6 +205,12 @@
 	sp = buffer_data[0] & 0x80 ? 1 : 0;
 	buffer_data[0] &= ~0x80;
 
+	/*
+	 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
+	 * received mode parameter buffer before doing MODE SELECT.
+	 */
+	data.device_specific = 0;
+
 	if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
 			     SD_MAX_RETRIES, &data, &sshdr)) {
 		if (scsi_sense_valid(&sshdr))
@@ -758,9 +765,10 @@
 	unsigned int data_len = 24;
 	char *buf;
 
-	rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
 	if (!rq->special_vec.bv_page)
 		return BLKPREP_DEFER;
+	clear_highpage(rq->special_vec.bv_page);
 	rq->special_vec.bv_offset = 0;
 	rq->special_vec.bv_len = data_len;
 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
@@ -791,9 +799,10 @@
 	u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
 	u32 data_len = sdp->sector_size;
 
-	rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
 	if (!rq->special_vec.bv_page)
 		return BLKPREP_DEFER;
+	clear_highpage(rq->special_vec.bv_page);
 	rq->special_vec.bv_offset = 0;
 	rq->special_vec.bv_len = data_len;
 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
@@ -821,9 +830,10 @@
 	u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
 	u32 data_len = sdp->sector_size;
 
-	rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
 	if (!rq->special_vec.bv_page)
 		return BLKPREP_DEFER;
+	clear_highpage(rq->special_vec.bv_page);
 	rq->special_vec.bv_offset = 0;
 	rq->special_vec.bv_len = data_len;
 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
@@ -1290,7 +1300,7 @@
 	u8 *cmnd;
 
 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
-		__free_page(rq->special_vec.bv_page);
+		mempool_free(rq->special_vec.bv_page, sd_page_pool);
 
 	if (SCpnt->cmnd != scsi_req(rq)->cmd) {
 		cmnd = SCpnt->cmnd;
@@ -3546,6 +3556,13 @@
 		goto err_out_cache;
 	}
 
+	sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
+	if (!sd_page_pool) {
+		printk(KERN_ERR "sd: can't init discard page pool\n");
+		err = -ENOMEM;
+		goto err_out_ppool;
+	}
+
 	err = scsi_register_driver(&sd_template.gendrv);
 	if (err)
 		goto err_out_driver;
@@ -3553,6 +3570,9 @@
 	return 0;
 
 err_out_driver:
+	mempool_destroy(sd_page_pool);
+
+err_out_ppool:
 	mempool_destroy(sd_cdb_pool);
 
 err_out_cache:
@@ -3579,6 +3599,7 @@
 
 	scsi_unregister_driver(&sd_template.gendrv);
 	mempool_destroy(sd_cdb_pool);
+	mempool_destroy(sd_page_pool);
 	kmem_cache_destroy(sd_cdb_cache);
 
 	class_unregister(&sd_disk_class);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 2112ea6..8c1a232 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -2720,6 +2720,9 @@
 		switch (response->header.iu_type) {
 		case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
 		case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
+			if (io_request->scmd)
+				io_request->scmd->result = 0;
+			/* fall through */
 		case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
 			break;
 		case PQI_RESPONSE_IU_TASK_MANAGEMENT:
@@ -6686,6 +6689,7 @@
 	 * storage.
 	 */
 	rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
+	pqi_free_interrupts(ctrl_info);
 	pqi_reset(ctrl_info);
 	if (rc == 0)
 		return;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9d0828f..dcbcb24 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4044,6 +4044,7 @@
 	int tag;
 	struct completion wait;
 	unsigned long flags;
+	bool has_read_lock = false;
 
 	/*
 	 * May get invoked from shutdown and IOCTL contexts.
@@ -4051,8 +4052,10 @@
 	 * In error recovery context, it may come with lock acquired.
 	 */
 
-	if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
+	if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba)) {
 		down_read(&hba->lock);
+		has_read_lock = true;
+	}
 
 	/*
 	 * Get free slot, sleep if slots are unavailable.
@@ -4090,7 +4093,7 @@
 out_put_tag:
 	ufshcd_put_dev_cmd_tag(hba, tag);
 	wake_up(&hba->dev_cmd.tag_wq);
-	if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
+	if (has_read_lock)
 		up_read(&hba->lock);
 	return err;
 }
@@ -8308,6 +8311,9 @@
 	dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
 				     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
 
+	dev_desc->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
+				  desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+
 	model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
 
 	/* Zero-pad entire buffer for string termination. */
@@ -8329,9 +8335,6 @@
 	/* Null terminate the model string */
 	dev_desc->model[MAX_MODEL_LEN] = '\0';
 
-	dev_desc->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
-				  desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
-
 out:
 	kfree(desc_buf);
 	return err;
@@ -9207,7 +9210,7 @@
 		switch (ioctl_data->idn) {
 		case QUERY_ATTR_IDN_BOOT_LU_EN:
 			index = 0;
-			if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
+			if (!att || att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
 				dev_err(hba->dev,
 					"%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
 					__func__, ioctl_data->opcode,
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index b18bdde..22bfc19 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -60,6 +60,14 @@
 	  the entities if corruption is suspected.
 	  If unsure, say N
 
+config QCOM_RUN_QUEUE_STATS
+       bool "Enable collection and exporting of QTI Run Queue stats to userspace"
+       help
+        This option enables the driver to periodically collecting the statistics
+        of kernel run queue information and calculate the load of the system.
+        This information is exported to usespace via sysfs entries and userspace
+        algorithms uses info and decide when to turn on/off the cpu cores.
+
 config QCOM_GSBI
         tristate "QCOM General Serial Bus Interface"
         depends on ARCH_QCOM
@@ -386,6 +394,15 @@
 	  subsystems within the SoC about other subsystems' power-up/down
 	  state-changes.
 
+config MSM_SYSMON_QMI_COMM
+       bool "MSM System Monitor communication support using QMI transport"
+       depends on QCOM_QMI_HELPERS && MSM_SUBSYSTEM_RESTART
+       help
+         This option adds support for MSM System Monitor APIs using the
+         QMI layer. The APIs provided may be used for notifying
+         subsystems within the SoC about other subsystems' power-up/down
+         state-changes.
+
 config MSM_PIL_SSR_GENERIC
 	tristate "MSM Subsystem Boot Support"
 	depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 8d0792a..43ca8fa 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,4 +1,3 @@
-# SPDX-License-Identifier: GPL-2.0
 CFLAGS_rpmh-rsc.o := -I$(src)
 obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
 obj-$(CONFIG_QCOM_GLINK_SSR) +=	glink_ssr.o
@@ -41,7 +40,8 @@
 obj-$(CONFIG_QCOM_DCC_V2) += dcc_v2.o
 obj-$(CONFIG_MSM_SERVICE_NOTIFIER) += service-notifier.o
 obj-$(CONFIG_MSM_SERVICE_LOCATOR) += service-locator.o
-obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o sysmon-qmi.o
+obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o
+obj-$(CONFIG_MSM_SYSMON_QMI_COMM) += sysmon-qmi.o
 obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
 obj-$(CONFIG_MEM_SHARE_QMI_SERVICE)		+= memshare/
 obj-$(CONFIG_MSM_PIL)   +=      peripheral-loader.o
@@ -49,6 +49,7 @@
 obj-$(CONFIG_MSM_CDSP_LOADER) += qdsp6v2/
 obj-$(CONFIG_MSM_JTAGV8) += jtagv8.o jtagv8-etm.o
 obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
+obj-$(CONFIG_QCOM_RUN_QUEUE_STATS) += rq_stats.o
 
 ifdef CONFIG_MSM_SUBSYSTEM_RESTART
 	obj-y += subsystem_notif.o
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index dad62a7..83970eb 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/sysfs.h>
 #include <linux/io.h>
+#include <linux/of.h>
 #include <linux/bitops.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
@@ -22,6 +23,7 @@
 #include <linux/serial.h>
 #include <linux/workqueue.h>
 #include <linux/power_supply.h>
+#include <soc/qcom/scm.h>
 
 #define EUD_ENABLE_CMD 1
 #define EUD_DISABLE_CMD 0
@@ -32,6 +34,7 @@
 #define EUD_REG_COM_RX_ID	0x000C
 #define EUD_REG_COM_RX_LEN	0x0010
 #define EUD_REG_COM_RX_DAT	0x0014
+#define EUD_REG_EUD_EN2		0x0000
 #define EUD_REG_INT1_EN_MASK	0x0024
 #define EUD_REG_INT_STATUS_1	0x0044
 #define EUD_REG_CTL_OUT_1	0x0074
@@ -65,6 +68,9 @@
 	struct extcon_dev		*extcon;
 	struct uart_port		port;
 	struct work_struct		eud_work;
+	struct power_supply		*batt_psy;
+	bool				secure_eud_en;
+	phys_addr_t			eud_mode_mgr2_phys_base;
 };
 
 static const unsigned int eud_extcon_cable[] = {
@@ -119,6 +125,14 @@
 		/* Enable vbus, chgr & safe mode warning interrupts */
 		writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR | EUD_INT_SAFE_MODE,
 				priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
+		/* Enable secure eud if supported */
+		if (priv->secure_eud_en) {
+			ret = scm_io_write(priv->eud_mode_mgr2_phys_base +
+					   EUD_REG_EUD_EN2, EUD_ENABLE_CMD);
+			if (ret)
+				dev_err(&pdev->dev,
+				"scm_io_write failed with rc:%d\n", ret);
+		}
 
 		/* Ensure Register Writes Complete */
 		wmb();
@@ -142,10 +156,21 @@
 static void disable_eud(struct platform_device *pdev)
 {
 	struct eud_chip *priv = platform_get_drvdata(pdev);
+	int ret;
 
 	/* write into CSR to disable EUD */
 	writel_relaxed(0, priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
-	dev_dbg(&pdev->dev, "%s: EUD is Disabled\n", __func__);
+
+	/* Disable secure eud if supported */
+	if (priv->secure_eud_en) {
+		ret = scm_io_write(priv->eud_mode_mgr2_phys_base +
+				   EUD_REG_EUD_EN2, EUD_DISABLE_CMD);
+		if (ret)
+			dev_err(&pdev->dev,
+			"scm_io_write failed with rc:%d\n", ret);
+	}
+
+	dev_dbg(&pdev->dev, "%s: EUD Disabled!\n", __func__);
 }
 
 static int param_eud_set(const char *val, const struct kernel_param *kp)
@@ -180,17 +205,33 @@
 
 module_param_cb(enable, &eud_param_ops, &enable, 0644);
 
+static bool is_batt_available(struct eud_chip *chip)
+{
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	return true;
+}
+
 static void eud_event_notifier(struct work_struct *eud_work)
 {
 	struct eud_chip *chip = container_of(eud_work, struct eud_chip,
 					eud_work);
+	union power_supply_propval pval;
 
 	if (chip->int_status == EUD_INT_VBUS)
 		extcon_set_state_sync(chip->extcon, chip->extcon_id,
 					chip->usb_attach);
-	else if (chip->int_status == EUD_INT_CHGR)
-		extcon_set_state_sync(chip->extcon, chip->extcon_id,
-					chip->chgr_enable);
+	else if (chip->int_status == EUD_INT_CHGR) {
+		if (is_batt_available(chip)) {
+			pval.intval = !chip->chgr_enable;
+			power_supply_set_property(chip->batt_psy,
+				POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+		}
+	}
 }
 
 static void usb_attach_detach(struct eud_chip *chip)
@@ -508,6 +549,22 @@
 
 	chip->eud_irq = platform_get_irq_byname(pdev, "eud_irq");
 
+	chip->secure_eud_en = of_property_read_bool(pdev->dev.of_node,
+			      "qcom,secure-eud-en");
+	if (chip->secure_eud_en) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   "eud_mode_mgr2");
+		if (!res) {
+			dev_err(chip->dev,
+			"%s: failed to get resource eud_mode_mgr2\n",
+			__func__);
+			ret = -ENOMEM;
+			return ret;
+		}
+
+		chip->eud_mode_mgr2_phys_base = res->start;
+	}
+
 	ret = devm_request_irq(&pdev->dev, chip->eud_irq, handle_eud_irq,
 				IRQF_TRIGGER_HIGH, "eud_irq", chip);
 	if (ret) {
diff --git a/drivers/soc/qcom/fsa4480-i2c.c b/drivers/soc/qcom/fsa4480-i2c.c
index ca57325..707d249 100644
--- a/drivers/soc/qcom/fsa4480-i2c.c
+++ b/drivers/soc/qcom/fsa4480-i2c.c
@@ -150,20 +150,27 @@
 	dev_dbg(dev, "%s: setting GPIOs active = %d\n",
 		__func__, mode.intval != POWER_SUPPLY_TYPEC_NONE);
 
-	if (mode.intval != POWER_SUPPLY_TYPEC_NONE) {
+	switch (mode.intval) {
+	/* add all modes FSA should notify for in here */
+	case POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER:
 		/* activate switches */
 		fsa4480_usbc_update_settings(fsa_priv, 0x00, 0x9F);
 
 		/* notify call chain on event */
 		blocking_notifier_call_chain(&fsa_priv->fsa4480_notifier,
-		POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER, NULL);
-	} else {
+		mode.intval, NULL);
+		break;
+	case POWER_SUPPLY_TYPEC_NONE:
 		/* notify call chain on event */
 		blocking_notifier_call_chain(&fsa_priv->fsa4480_notifier,
 				POWER_SUPPLY_TYPEC_NONE, NULL);
 
 		/* deactivate switches */
 		fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98);
+		break;
+	default:
+		/* ignore other usb connection modes */
+		break;
 	}
 
 done:
diff --git a/drivers/soc/qcom/llcc-kona.c b/drivers/soc/qcom/llcc-kona.c
index e90e4d8..dbbd79f 100644
--- a/drivers/soc/qcom/llcc-kona.c
+++ b/drivers/soc/qcom/llcc-kona.c
@@ -56,7 +56,7 @@
 	SCT_ENTRY(LLCC_AUDIO,    6, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0),
 	SCT_ENTRY(LLCC_CMPT,    10, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0),
 	SCT_ENTRY(LLCC_GPUHTW,  11, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
-	SCT_ENTRY(LLCC_GPU,     12, 2560, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 1, 0),
+	SCT_ENTRY(LLCC_GPU,     12, 2048, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 1, 0),
 	SCT_ENTRY(LLCC_MMUHWT,  13, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 1),
 	SCT_ENTRY(LLCC_CMPTDMA, 15, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
 	SCT_ENTRY(LLCC_DISP,    16, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0),
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
index 6a82b5d..c59d1d6 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -42,6 +42,10 @@
 #define LLCC_TRP_ATTR0_CFGn(n)        (0x21000 + SZ_8 * n)
 #define LLCC_TRP_ATTR1_CFGn(n)        (0x21004 + SZ_8 * n)
 
+#define LLCC_TRP_C_AS_NC	      0x21F90
+#define LLCC_TRP_NC_AS_C	      0x21F94
+#define LLCC_FEAC_C_AS_NC	      0x35030
+#define LLCC_FEAC_NC_AS_C	      0x35034
 #define LLCC_TRP_WRSC_EN              0x21F20
 #define LLCC_WRSC_SCID_EN(n)          BIT(n)
 
@@ -232,15 +236,44 @@
 	u32 sz;
 	u32 pcb = 0;
 	u32 cad = 0;
+	u32 wren = 0;
 	int ret = 0;
 	const struct llcc_slice_config *llcc_table;
 	struct llcc_slice_desc desc;
 	bool cap_based_alloc_and_pwr_collapse =
 		drv_data->cap_based_alloc_and_pwr_collapse;
+	uint32_t mask = ~0;
+	int v2_ver = of_device_is_compatible(pdev->dev.of_node,
+							 "qcom,llcc-v2");
 
 	sz = drv_data->cfg_size;
 	llcc_table = drv_data->cfg;
 
+	/* Disable the Cache as Non-Cache override and enable
+	 * the Non-Cache as Cache override
+	 */
+	if (v2_ver) {
+		ret  = regmap_write(drv_data->bcast_regmap,
+						 LLCC_TRP_C_AS_NC, 0);
+		if (ret)
+			return ret;
+
+		ret = regmap_write(drv_data->bcast_regmap,
+						 LLCC_TRP_NC_AS_C, mask);
+		if (ret)
+			return ret;
+	} else {
+		ret  = regmap_write(drv_data->bcast_regmap,
+						 LLCC_FEAC_C_AS_NC, 0);
+		if (ret)
+			return ret;
+
+		ret = regmap_write(drv_data->bcast_regmap,
+						 LLCC_FEAC_NC_AS_C, mask);
+		if (ret)
+			return ret;
+	}
+
 	for (i = 0; i < sz; i++) {
 		attr1_cfg = LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
 		attr0_cfg = LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
@@ -277,10 +310,11 @@
 		if (ret)
 			return ret;
 
-		if (llcc_table[i].write_scid_en) {
+		if (v2_ver) {
+			wren |= llcc_table[i].write_scid_en <<
+						llcc_table[i].slice_id;
 			ret = regmap_write(drv_data->bcast_regmap,
-				LLCC_TRP_WRSC_EN,
-				LLCC_WRSC_SCID_EN(llcc_table[i].slice_id));
+				LLCC_TRP_WRSC_EN, wren);
 			if (ret)
 				return ret;
 		}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index 73f4258..83b1737 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -272,7 +272,6 @@
 
 	cmd->addr = cur_bcm->bcmdev->addr;
 	cmd->data = BCM_TCS_CMD(commit, valid, vec_a, vec_b);
-	cmd->wait = commit;
 
 	return ret;
 }
@@ -320,6 +319,7 @@
 			commit = false;
 			if (list_is_last(&cur_bcm->link,
 						&cur_bcm_clist[i])) {
+				cmdlist_active[k].wait = true;
 				commit = true;
 				idx++;
 			}
@@ -369,6 +369,11 @@
 				idx++;
 			}
 
+			if (cur_rsc->node_info->id == MSM_BUS_RSC_DISP) {
+				cmdlist_wake[last_tcs].wait = false;
+				cmdlist_sleep[last_tcs].wait = false;
+			}
+
 			tcs_cmd_gen(cur_bcm, &cmdlist_wake[k],
 				cur_bcm->node_vec[ACTIVE_CTX].vec_a,
 				cur_bcm->node_vec[ACTIVE_CTX].vec_b, commit);
diff --git a/drivers/soc/qcom/qtee_shmbridge.c b/drivers/soc/qcom/qtee_shmbridge.c
index 13339b4..f021b7c 100644
--- a/drivers/soc/qcom/qtee_shmbridge.c
+++ b/drivers/soc/qcom/qtee_shmbridge.c
@@ -13,7 +13,6 @@
 #include <soc/qcom/scm.h>
 #include <soc/qcom/qseecomi.h>
 #include <soc/qcom/qtee_shmbridge.h>
-#include <soc/qcom/secure_buffer.h>
 
 #define DEFAULT_BRIDGE_SIZE	SZ_4M	/*4M*/
 /*
@@ -52,19 +51,26 @@
 	TZ_SYSCALL_CREATE_PARAM_ID_1( \
 	TZ_SYSCALL_PARAM_TYPE_VAL)
 
+#define MAXSHMVMS 4
 #define PERM_BITS 3
 #define VM_BITS 16
 #define SELF_OWNER_BIT 1
 #define SHM_NUM_VM_SHIFT 9
+#define SHM_VM_MASK 0xFFFF
+#define SHM_PERM_MASK 0x7
 
 #define VM_PERM_R PERM_READ
 #define VM_PERM_W PERM_WRITE
 
-/* ns_vmids = ns_vmid as destination number is only 1 */
-#define UPDATE_NS_VMIDS(ns_vmid)	((uint64_t)(ns_vmid))
+/* ns_vmids */
+#define UPDATE_NS_VMIDS(ns_vmids, id)	\
+				(((uint64_t)(ns_vmids) << VM_BITS) \
+				| ((uint64_t)(id) & SHM_VM_MASK))
 
-/* ns_perms = ns_vm_perm as destination number is only 1 */
-#define UPDATE_NS_PERMS(ns_vm_perm)	((uint64_t)(ns_vm_perm))
+/* ns_perms */
+#define UPDATE_NS_PERMS(ns_perms, perm)	\
+				(((uint64_t)(ns_perms) << PERM_BITS) \
+				| ((uint64_t)(perm) & SHM_PERM_MASK))
 
 /* pfn_and_ns_perm_flags = paddr | ns_perms */
 #define UPDATE_PFN_AND_NS_PERM_FLAGS(paddr, ns_perms)	\
@@ -106,7 +112,7 @@
 	desc.arginfo = TZ_SHM_BRIDGE_ENABLE_PARAM_ID;
 	ret = scm_call2(TZ_SHM_BRIDGE_ENABLE, &desc);
 	if (ret) {
-		pr_err("Failed to enable shmbridge, rsp = %d, ret = %d\n",
+		pr_err("Failed to enable shmbridge, rsp = %lld, ret = %d\n",
 			desc.ret[0], ret);
 		return -EINVAL;
 	}
@@ -126,40 +132,44 @@
 int32_t qtee_shmbridge_register(
 		phys_addr_t paddr,
 		size_t size,
-		uint32_t ns_vmid,
-		uint32_t ns_vm_perm,
+		uint32_t *ns_vmid_list,
+		uint32_t *ns_vm_perm_list,
+		uint32_t ns_vmid_num,
 		uint32_t tz_perm,
 		uint64_t *handle)
 
 {
 	int32_t ret = 0;
 	uint64_t ns_perms = 0;
-	uint64_t destnum = 1;
+	uint64_t ns_vmids = 0;
 	struct scm_desc desc = {0};
+	int i = 0;
 
-	if (!handle) {
-		pr_err("shmb handle pointer is NULL\n");
+	if (!handle || !ns_vmid_list || !ns_vm_perm_list ||
+				ns_vmid_num > MAXSHMVMS) {
+		pr_err("invalid input parameters\n");
 		return -EINVAL;
 	}
-	pr_debug("%s: paddr %lx, size %zu, ns_vmid %x, ns_vm_perm %x, ns_perms %s, tz_perm %x\n",
-			__func__, (uint64_t)paddr, size, ns_vmid,
-			ns_vm_perm, ns_perms, tz_perm);
 
-	ns_perms = UPDATE_NS_PERMS(ns_vm_perm);
+	for (i = 0; i < ns_vmid_num; i++) {
+		ns_perms = UPDATE_NS_PERMS(ns_perms, ns_vm_perm_list[i]);
+		ns_vmids = UPDATE_NS_VMIDS(ns_vmids, ns_vmid_list[i]);
+	}
+
 	desc.arginfo = TZ_SHM_BRIDGE_CREATE_PARAM_ID;
 	desc.args[0] = UPDATE_PFN_AND_NS_PERM_FLAGS(paddr, ns_perms);
 	desc.args[1] = UPDATE_IPFN_AND_S_PERM_FLAGS(paddr, tz_perm);
-	desc.args[2] = UPDATE_SIZE_AND_FLAGS(size, destnum);
-	desc.args[3] = UPDATE_NS_VMIDS(ns_vmid);
+	desc.args[2] = UPDATE_SIZE_AND_FLAGS(size, ns_vmid_num);
+	desc.args[3] = ns_vmids;
 
-	pr_debug("%s: arginfo %lx, desc.args[0] %lx, args[1] %lx, args[2] %lx, args[3] %lx\n",
+	pr_debug("%s: arginfo %x, desc.args[0] %llx, args[1] %llx, args[2] %llx, args[3] %llx\n",
 			__func__, desc.arginfo, desc.args[0],
 			desc.args[1], desc.args[2], desc.args[3]);
 	ret = scm_call2(TZ_SHM_BRIDGE_CREATE, &desc);
 	if (ret || desc.ret[0]) {
-		pr_err("create shmbridge failed, ret = %d, status = %x\n",
+		pr_err("create shmbridge failed, ret = %d, status = %llx\n",
 				ret, desc.ret[0]);
-		return ret;
+		return -EINVAL;
 	}
 	*handle = desc.ret[1];
 	return 0;
@@ -191,7 +201,7 @@
 	unsigned long va;
 
 	if (size > DEFAULT_BRIDGE_SIZE) {
-		pr_err("requestd size %zu is larger than bridge size %zu\n",
+		pr_err("requestd size %zu is larger than bridge size %d\n",
 			size, DEFAULT_BRIDGE_SIZE);
 		ret = -EINVAL;
 		goto exit;
@@ -216,7 +226,7 @@
 	shm->paddr = gen_pool_virt_to_phys(default_bridge.genpool, va);
 	shm->size = size;
 
-	pr_debug("%s: shm->paddr %lx, size %zu\n",
+	pr_debug("%s: shm->paddr %llx, size %zu\n",
 			__func__, (uint64_t)shm->paddr, shm->size);
 
 exit:
@@ -242,6 +252,8 @@
 static int __init qtee_shmbridge_init(void)
 {
 	int ret = 0;
+	uint32_t ns_vm_ids[] = {VMID_HLOS};
+	uint32_t ns_vm_perms[] = {VM_PERM_R|VM_PERM_W};
 
 	if (default_bridge.vaddr) {
 		pr_warn("qtee shmbridge is already initialized\n");
@@ -264,8 +276,8 @@
 
 	/*register default bridge*/
 	ret = qtee_shmbridge_register(default_bridge.paddr,
-			default_bridge.size, VMID_HLOS,
-			VM_PERM_R|VM_PERM_W, VM_PERM_R|VM_PERM_W,
+			default_bridge.size, ns_vm_ids,
+			ns_vm_perms, 1, VM_PERM_R|VM_PERM_W,
 			&default_bridge.handle);
 	if (ret) {
 		pr_err("Failed to register default bridge, size %zu\n",
diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c
index 5de7d32..a4798fe 100644
--- a/drivers/soc/qcom/ramdump.c
+++ b/drivers/soc/qcom/ramdump.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -17,6 +17,7 @@
 #include <linux/elf.h>
 #include <linux/wait.h>
 #include <linux/cdev.h>
+#include <linux/atomic.h>
 #include <soc/qcom/ramdump.h>
 #include <linux/dma-mapping.h>
 #include <linux/of.h>
@@ -34,14 +35,22 @@
 #define MAX_STRTBL_SIZE 512
 #define MAX_NAME_LENGTH 16
 
+struct consumer_entry {
+	bool data_ready;
+	struct ramdump_device *rd_dev;
+	struct list_head list;
+};
+
 struct ramdump_device {
 	char name[256];
 
-	unsigned int data_ready;
-	unsigned int consumer_present;
+	unsigned int consumers;
+	atomic_t readers_left;
 	int ramdump_status;
 
 	struct completion ramdump_complete;
+	struct mutex consumer_lock;
+	struct list_head consumer_list;
 	struct cdev cdev;
 	struct device *dev;
 
@@ -58,20 +67,51 @@
 {
 	struct ramdump_device *rd_dev = container_of(inode->i_cdev,
 					struct ramdump_device, cdev);
-	rd_dev->consumer_present = 1;
+	struct consumer_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+
+	if (!entry)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&entry->list);
+	entry->rd_dev = rd_dev;
+	mutex_lock(&rd_dev->consumer_lock);
+	rd_dev->consumers++;
 	rd_dev->ramdump_status = 0;
-	filep->private_data = rd_dev;
+	list_add_tail(&entry->list, &rd_dev->consumer_list);
+	mutex_unlock(&rd_dev->consumer_lock);
+	filep->private_data = entry;
 	return 0;
 }
 
+static void reset_ramdump_entry(struct consumer_entry *entry)
+{
+	struct ramdump_device *rd_dev = entry->rd_dev;
+
+	entry->data_ready = false;
+	if (atomic_dec_return(&rd_dev->readers_left) == 0)
+		complete(&rd_dev->ramdump_complete);
+}
+
 static int ramdump_release(struct inode *inode, struct file *filep)
 {
 
 	struct ramdump_device *rd_dev = container_of(inode->i_cdev,
 					struct ramdump_device, cdev);
-	rd_dev->consumer_present = 0;
-	rd_dev->data_ready = 0;
-	complete(&rd_dev->ramdump_complete);
+	struct consumer_entry *entry = filep->private_data;
+
+	mutex_lock(&rd_dev->consumer_lock);
+	/*
+	 * Avoid double decrementing in cases where we finish reading the dump
+	 * and then close the file, but there are other readers that have not
+	 * yet finished.
+	 */
+	if (entry->data_ready)
+		reset_ramdump_entry(entry);
+	rd_dev->consumers--;
+	list_del(&entry->list);
+	mutex_unlock(&rd_dev->consumer_lock);
+	entry->rd_dev = NULL;
+	kfree(entry);
 	return 0;
 }
 
@@ -112,7 +152,8 @@
 static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
 			loff_t *pos)
 {
-	struct ramdump_device *rd_dev = filep->private_data;
+	struct consumer_entry *entry = filep->private_data;
+	struct ramdump_device *rd_dev = entry->rd_dev;
 	void *device_mem = NULL, *origdevice_mem = NULL, *vaddr = NULL;
 	unsigned long data_left = 0, bytes_before, bytes_after;
 	unsigned long addr = 0;
@@ -121,10 +162,10 @@
 	int ret = 0;
 	loff_t orig_pos = *pos;
 
-	if ((filep->f_flags & O_NONBLOCK) && !rd_dev->data_ready)
+	if ((filep->f_flags & O_NONBLOCK) && !entry->data_ready)
 		return -EAGAIN;
 
-	ret = wait_event_interruptible(rd_dev->dump_wait_q, rd_dev->data_ready);
+	ret = wait_event_interruptible(rd_dev->dump_wait_q, entry->data_ready);
 	if (ret)
 		return ret;
 
@@ -224,19 +265,19 @@
 		dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size);
 
 	kfree(finalbuf);
-	rd_dev->data_ready = 0;
 	*pos = 0;
-	complete(&rd_dev->ramdump_complete);
+	reset_ramdump_entry(entry);
 	return ret;
 }
 
 static unsigned int ramdump_poll(struct file *filep,
 					struct poll_table_struct *wait)
 {
-	struct ramdump_device *rd_dev = filep->private_data;
+	struct consumer_entry *entry = filep->private_data;
+	struct ramdump_device *rd_dev = entry->rd_dev;
 	unsigned int mask = 0;
 
-	if (rd_dev->data_ready)
+	if (entry->data_ready)
 		mask |= (POLLIN | POLLRDNORM);
 
 	poll_wait(filep, &rd_dev->dump_wait_q, wait);
@@ -312,6 +353,7 @@
 			"for %s segments only will be dumped.", dev_name);
 	}
 
+	INIT_LIST_HEAD(&rd_dev->consumer_list);
 	init_waitqueue_head(&rd_dev->dump_wait_q);
 
 	rd_dev->dev = device_create(ramdump_class, parent,
@@ -324,6 +366,8 @@
 		goto fail_return_minor;
 	}
 
+	mutex_init(&rd_dev->consumer_lock);
+	atomic_set(&rd_dev->readers_left, 0);
 	cdev_init(&rd_dev->cdev, &ramdump_file_ops);
 
 	ret = cdev_add(&rd_dev->cdev, MKDEV(MAJOR(ramdump_dev), minor), 1);
@@ -336,6 +380,7 @@
 	return (void *)rd_dev;
 
 fail_cdev_add:
+	mutex_destroy(&rd_dev->consumer_lock);
 	device_unregister(rd_dev->dev);
 fail_return_minor:
 	ida_simple_remove(&rd_minor_id, minor);
@@ -365,12 +410,27 @@
 {
 	int ret, i;
 	struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
+	struct consumer_entry *entry;
 	Elf32_Phdr *phdr;
 	Elf32_Ehdr *ehdr;
 	unsigned long offset;
 
-	if (!rd_dev->consumer_present) {
+	/*
+	 * Acquire the consumer lock here, and hold the lock until we are done
+	 * preparing the data structures required for the ramdump session, and
+	 * have woken all readers. This essentially freezes the current readers
+	 * when the lock is taken here, such that the readers at that time are
+	 * the only ones that will participate in the ramdump session. After
+	 * the current list of readers has been awoken, new readers that add
+	 * themselves to the reader list will not participate in the current
+	 * ramdump session. This allows for the lock to be free while the
+	 * ramdump is occurring, which prevents stalling readers who want to
+	 * close the ramdump node or new readers that want to open it.
+	 */
+	mutex_lock(&rd_dev->consumer_lock);
+	if (!rd_dev->consumers) {
 		pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
+		mutex_unlock(&rd_dev->consumer_lock);
 		return -EPIPE;
 	}
 
@@ -388,8 +448,10 @@
 				       sizeof(*phdr) * nsegments;
 		ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
 		rd_dev->elfcore_buf = (char *)ehdr;
-		if (!rd_dev->elfcore_buf)
+		if (!rd_dev->elfcore_buf) {
+			mutex_unlock(&rd_dev->consumer_lock);
 			return -ENOMEM;
+		}
 
 		memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
 		ehdr->e_ident[EI_CLASS] = ELFCLASS32;
@@ -415,13 +477,16 @@
 		}
 	}
 
-	rd_dev->data_ready = 1;
+	list_for_each_entry(entry, &rd_dev->consumer_list, list)
+		entry->data_ready = true;
 	rd_dev->ramdump_status = -1;
 
 	reinit_completion(&rd_dev->ramdump_complete);
+	atomic_set(&rd_dev->readers_left, rd_dev->consumers);
 
 	/* Tell userspace that the data is ready */
 	wake_up(&rd_dev->dump_wait_q);
+	mutex_unlock(&rd_dev->consumer_lock);
 
 	/* Wait (with a timeout) to let the ramdump complete */
 	ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
@@ -434,7 +499,6 @@
 	} else
 		ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
 
-	rd_dev->data_ready = 0;
 	rd_dev->elfcore_size = 0;
 	kfree(rd_dev->elfcore_buf);
 	rd_dev->elfcore_buf = NULL;
@@ -465,12 +529,27 @@
 {
 	int ret, i;
 	struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
+	struct consumer_entry *entry;
 	struct elfhdr *ehdr;
 	struct elf_shdr *shdr;
 	unsigned long offset, strtbl_off;
 
-	if (!rd_dev->consumer_present) {
+	/*
+	 * Acquire the consumer lock here, and hold the lock until we are done
+	 * preparing the data structures required for the ramdump session, and
+	 * have woken all readers. This essentially freezes the current readers
+	 * when the lock is taken here, such that the readers at that time are
+	 * the only ones that will participate in the ramdump session. After
+	 * the current list of readers has been awoken, new readers that add
+	 * themselves to the reader list will not participate in the current
+	 * ramdump session. This allows for the lock to be free while the
+	 * ramdump is occurring, which prevents stalling readers who want to
+	 * close the ramdump node or new readers that want to open it.
+	 */
+	mutex_lock(&rd_dev->consumer_lock);
+	if (!rd_dev->consumers) {
 		pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
+		mutex_unlock(&rd_dev->consumer_lock);
 		return -EPIPE;
 	}
 
@@ -481,8 +560,10 @@
 			(sizeof(*shdr) * (nsegments + 2)) + MAX_STRTBL_SIZE;
 	ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
 	rd_dev->elfcore_buf = (char *)ehdr;
-	if (!rd_dev->elfcore_buf)
+	if (!rd_dev->elfcore_buf) {
+		mutex_unlock(&rd_dev->consumer_lock);
 		return -ENOMEM;
+	}
 
 	memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
 	ehdr->e_ident[EI_CLASS] = ELF_CLASS;
@@ -523,13 +604,16 @@
 	}
 	ehdr->e_shnum = nsegments + 2;
 
-	rd_dev->data_ready = 1;
+	list_for_each_entry(entry, &rd_dev->consumer_list, list)
+		entry->data_ready = true;
 	rd_dev->ramdump_status = -1;
 
 	reinit_completion(&rd_dev->ramdump_complete);
+	atomic_set(&rd_dev->readers_left, rd_dev->consumers);
 
 	/* Tell userspace that the data is ready */
 	wake_up(&rd_dev->dump_wait_q);
+	mutex_unlock(&rd_dev->consumer_lock);
 
 	/* Wait (with a timeout) to let the ramdump complete */
 	ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
@@ -543,7 +627,6 @@
 		ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
 	}
 
-	rd_dev->data_ready = 0;
 	rd_dev->elfcore_size = 0;
 	kfree(rd_dev->elfcore_buf);
 	rd_dev->elfcore_buf = NULL;
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index ab950d8..a94095e 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
@@ -66,6 +66,13 @@
 #define RSC_PDC_DRV_DATA		0x38
 #define RSC_PDC_DATA_OFFSET		0x08
 
+#define ACCL_TYPE(addr)			((addr >> 16) & 0xF)
+#define NR_ACCL_TYPES			3
+
+static const char * const accl_str[] = {
+	"", "", "", "CLK", "VREG", "BUS",
+};
+
 bool rpmh_standalone;
 static struct rsc_drv *__rsc_drv[2];
 static int __rsc_count;
@@ -630,11 +637,10 @@
 	return NULL;
 }
 
-static void print_tcs_info(struct rsc_drv *drv, int tcs_id)
+static void print_tcs_info(struct rsc_drv *drv, int tcs_id, unsigned long *accl)
 {
 	struct tcs_group *tcs_grp = get_tcs_from_index(drv, tcs_id);
 	const struct tcs_request *req = get_req_from_tcs(drv, tcs_id);
-	struct tcs_cmd *cmd;
 	unsigned long cmds_enabled;
 	u32 addr, data, msgid, sts, irq_sts;
 	bool in_use = test_bit(tcs_id, drv->tcs_in_use);
@@ -658,29 +664,17 @@
 		tcs_id, sts ? "IDLE" : "BUSY", data,
 		(irq_sts & BIT(tcs_id)) ? "COMPLETED" : "PENDING");
 
-	for (i = 0; i < req->num_cmds; i++) {
-		cmd = &req->cmds[i];
-		pr_warn("\tREQ=%d [addr=0x%x data=0x%x wait=0x%x]\n",
-			i, cmd->addr, cmd->data, cmd->wait);
-
-		if (i < MAX_CMDS_PER_TCS) {
-			addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, i);
-			data = read_tcs_reg(drv, RSC_DRV_CMD_DATA, tcs_id, i);
-			msgid = read_tcs_reg(drv, RSC_DRV_CMD_MSGID, tcs_id, i);
-			sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, tcs_id, i);
-			pr_warn("\tCMD=%d [addr=0x%x data=0x%x hdr=0x%x sts=0x%x enabled=%ld]\n",
-				i, addr, data, msgid, sts,
-				(cmds_enabled & BIT(i)));
-		}
-	}
-
-	for_each_set_bit_from(i, &cmds_enabled, MAX_CMDS_PER_TCS) {
+	for_each_set_bit(i, &cmds_enabled, MAX_CMDS_PER_TCS) {
 		addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, i);
 		data = read_tcs_reg(drv, RSC_DRV_CMD_DATA, tcs_id, i);
 		msgid = read_tcs_reg(drv, RSC_DRV_CMD_MSGID, tcs_id, i);
 		sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, tcs_id, i);
 		pr_warn("\tCMD=%d [addr=0x%x data=0x%x hdr=0x%x sts=0x%x enabled=1]\n",
 			i, addr, data, msgid, sts);
+		if (!(sts & CMD_STATUS_ISSUED))
+			continue;
+		if (!(sts & CMD_STATUS_COMPL))
+			*accl |= BIT(ACCL_TYPE(addr));
 	}
 }
 
@@ -690,6 +684,8 @@
 	bool irq_sts;
 	int i;
 	int busy = 0;
+	unsigned long accl = 0;
+	char str[20] = "";
 
 	pr_warn("RSC:%s\n", drv->name);
 
@@ -697,7 +693,7 @@
 		if (!test_bit(i, drv->tcs_in_use))
 			continue;
 		busy++;
-		print_tcs_info(drv, i);
+		print_tcs_info(drv, i, &accl);
 	}
 
 	if (!rsc_irq_data) {
@@ -709,6 +705,17 @@
 	pr_warn("HW IRQ %lu is %s at GIC\n", rsc_irq_data->hwirq,
 		irq_sts ? "PENDING" : "NOT PENDING");
 
+	for_each_set_bit(i, &accl, ARRAY_SIZE(accl_str)) {
+		strlcat(str, accl_str[i], sizeof(str));
+		strlcat(str, " ", sizeof(str));
+	}
+
+	if (busy && !irq_sts)
+		pr_warn("ERROR:Accelerator(s) { %s } at AOSS did not respond\n",
+			str);
+	else if (irq_sts)
+		pr_warn("ERROR:Possible lockup in Linux\n");
+
 	/*
 	 * The TCS(s) are busy waiting, we have no way to recover from this.
 	 * If this debug function is called, we assume it's because timeout
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 3f1bcf2..4443e277 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -117,6 +117,7 @@
 	struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
 						    msg);
 	struct completion *compl = rpm_msg->completion;
+	bool free = rpm_msg->needs_free;
 
 	rpm_msg->err = r;
 
@@ -131,7 +132,7 @@
 	complete(compl);
 
 exit:
-	if (rpm_msg->needs_free)
+	if (free)
 		kfree(rpm_msg);
 }
 
@@ -400,11 +401,12 @@
 {
 	struct batch_cache_req *req;
 	struct rpmh_request *rpm_msgs;
-	DECLARE_COMPLETION_ONSTACK(compl);
+	struct completion *compls;
 	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
 	unsigned long time_left;
 	int count = 0;
-	int ret, i, j;
+	int ret, i;
+	void *ptr;
 
 	if (!cmd || !n)
 		return -EINVAL;
@@ -421,10 +423,15 @@
 	if (!count)
 		return -EINVAL;
 
-	req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
+	ptr = kzalloc(sizeof(*req) +
+		      count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
 		      GFP_ATOMIC);
-	if (!req)
+	if (!ptr)
 		return -ENOMEM;
+
+	req = ptr;
+	compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
+
 	req->count = count;
 	rpm_msgs = req->rpm_msgs;
 
@@ -439,25 +446,26 @@
 	}
 
 	for (i = 0; i < count; i++) {
-		rpm_msgs[i].completion = &compl;
+		struct completion *compl = &compls[i];
+
+		init_completion(compl);
+		rpm_msgs[i].completion = compl;
 		ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
 		if (ret) {
 			pr_err("Error(%d) sending RPMH message addr=%#x\n",
 			       ret, rpm_msgs[i].msg.cmds[0].addr);
-			for (j = i; j < count; j++)
-				rpmh_tx_done(&rpm_msgs[j].msg, ret);
 			break;
 		}
 	}
 
 	time_left = RPMH_TIMEOUT_MS;
-	for (i = 0; i < count; i++) {
-		time_left = wait_for_completion_timeout(&compl, time_left);
+	while (i--) {
+		time_left = wait_for_completion_timeout(&compls[i], time_left);
 		if (!time_left) {
 			/*
 			 * Better hope they never finish because they'll signal
-			 * the completion on our stack and that's bad once
-			 * we've returned from the function.
+			 * the completion that we're going to free once
+			 * we've returned from this function.
 			 */
 			rpmh_rsc_debug(ctrlr_to_drv(ctrlr));
 			ret = -ETIMEDOUT;
@@ -466,7 +474,7 @@
 	}
 
 exit:
-	kfree(req);
+	kfree(ptr);
 
 	return ret;
 }
diff --git a/drivers/soc/qcom/rq_stats.c b/drivers/soc/qcom/rq_stats.c
new file mode 100644
index 0000000..c5d6f07
--- /dev/null
+++ b/drivers/soc/qcom/rq_stats.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010-2015, 2017, 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/cpu.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/rq_stats.h>
+#include <linux/cpufreq.h>
+#include <linux/kernel_stat.h>
+#include <linux/tick.h>
+#include <asm/smp_plat.h>
+#include <linux/suspend.h>
+
+#define MAX_LONG_SIZE 24
+#define DEFAULT_RQ_POLL_JIFFIES 1
+#define DEFAULT_DEF_TIMER_JIFFIES 5
+
+struct notifier_block freq_transition;
+
+struct cpu_load_data {
+	u64 prev_cpu_idle;
+	u64 prev_cpu_wall;
+	unsigned int avg_load_maxfreq;
+	unsigned int samples;
+	unsigned int window_size;
+	unsigned int cur_freq;
+	unsigned int policy_max;
+	cpumask_var_t related_cpus;
+	struct mutex cpu_load_mutex;
+};
+
+static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
+
+
+static int update_average_load(unsigned int freq, unsigned int cpu)
+{
+
+	struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
+	u64 cur_wall_time, cur_idle_time;
+	unsigned int idle_time, wall_time;
+	unsigned int cur_load, load_at_max_freq;
+
+	cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time, 0);
+
+	wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
+	pcpu->prev_cpu_wall = cur_wall_time;
+
+	idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
+	pcpu->prev_cpu_idle = cur_idle_time;
+
+
+	if (unlikely(wall_time <= 0 || wall_time < idle_time))
+		return 0;
+
+	cur_load = 100 * (wall_time - idle_time) / wall_time;
+
+	/* Calculate the scaled load across CPU */
+	load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
+
+	if (!pcpu->avg_load_maxfreq) {
+		/* This is the first sample in this window*/
+		pcpu->avg_load_maxfreq = load_at_max_freq;
+		pcpu->window_size = wall_time;
+	} else {
+		/*
+		 * The is already a sample available in this window.
+		 * Compute weighted average with prev entry, so that we get
+		 * the precise weighted load.
+		 */
+		pcpu->avg_load_maxfreq =
+			((pcpu->avg_load_maxfreq * pcpu->window_size) +
+			(load_at_max_freq * wall_time)) /
+			(wall_time + pcpu->window_size);
+
+		pcpu->window_size += wall_time;
+	}
+
+	return 0;
+}
+
+static unsigned int report_load_at_max_freq(void)
+{
+	int cpu;
+	struct cpu_load_data *pcpu;
+	unsigned int total_load = 0;
+
+	for_each_online_cpu(cpu) {
+		pcpu = &per_cpu(cpuload, cpu);
+		mutex_lock(&pcpu->cpu_load_mutex);
+		update_average_load(pcpu->cur_freq, cpu);
+		total_load += pcpu->avg_load_maxfreq;
+		pcpu->avg_load_maxfreq = 0;
+		mutex_unlock(&pcpu->cpu_load_mutex);
+	}
+	return total_load;
+}
+
+static int cpufreq_transition_handler(struct notifier_block *nb,
+			unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freqs = data;
+	struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
+	int j;
+
+	switch (val) {
+	case CPUFREQ_POSTCHANGE:
+		for_each_cpu(j, this_cpu->related_cpus) {
+			struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
+
+			mutex_lock(&pcpu->cpu_load_mutex);
+			update_average_load(freqs->old, j);
+			pcpu->cur_freq = freqs->new;
+			mutex_unlock(&pcpu->cpu_load_mutex);
+		}
+		break;
+	}
+	return 0;
+}
+
+static void update_related_cpus(void)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, cpu_online_mask) {
+		struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
+		struct cpufreq_policy cpu_policy;
+
+		cpufreq_get_policy(&cpu_policy, cpu);
+		cpumask_copy(this_cpu->related_cpus, cpu_policy.cpus);
+	}
+}
+
+static int cpu_online_handler(unsigned int cpu)
+{
+	struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
+
+	if (!this_cpu->cur_freq)
+		this_cpu->cur_freq = cpufreq_quick_get(cpu);
+	update_related_cpus();
+	this_cpu->avg_load_maxfreq = 0;
+	return 0;
+}
+
+static int system_suspend_handler(struct notifier_block *nb,
+				unsigned long val, void *data)
+{
+	switch (val) {
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+	case PM_POST_RESTORE:
+		rq_info.hotplug_disabled = 0;
+		break;
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		rq_info.hotplug_disabled = 1;
+		break;
+	default:
+		return NOTIFY_DONE;
+	}
+	return NOTIFY_OK;
+}
+
+
+static ssize_t hotplug_disable_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	unsigned int val = rq_info.hotplug_disabled;
+
+	return snprintf(buf, MAX_LONG_SIZE, "%d\n", val);
+}
+
+static struct kobj_attribute hotplug_disabled_attr = __ATTR_RO(hotplug_disable);
+
+static void def_work_fn(struct work_struct *work)
+{
+	/* Notify polling threads on change of value */
+	sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
+}
+
+static ssize_t run_queue_avg_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	unsigned int val = 0;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&rq_lock, flags);
+	/* rq avg currently available only on one core */
+	val = rq_info.rq_avg;
+	rq_info.rq_avg = 0;
+	spin_unlock_irqrestore(&rq_lock, flags);
+
+	return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
+}
+
+static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
+
+static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
+				      struct kobj_attribute *attr, char *buf)
+{
+	int ret = 0;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&rq_lock, flags);
+	ret = snprintf(buf, MAX_LONG_SIZE, "%u\n",
+		       jiffies_to_msecs(rq_info.rq_poll_jiffies));
+	spin_unlock_irqrestore(&rq_lock, flags);
+
+	return ret;
+}
+
+static ssize_t store_run_queue_poll_ms(struct kobject *kobj,
+				       struct kobj_attribute *attr,
+				       const char *buf, size_t count)
+{
+	unsigned int val = 0;
+	unsigned long flags = 0;
+	static DEFINE_MUTEX(lock_poll_ms);
+
+	mutex_lock(&lock_poll_ms);
+
+	spin_lock_irqsave(&rq_lock, flags);
+	if (kstrtouint(buf, 0, &val))
+		count = -EINVAL;
+	else
+		rq_info.rq_poll_jiffies = msecs_to_jiffies(val);
+	spin_unlock_irqrestore(&rq_lock, flags);
+
+	mutex_unlock(&lock_poll_ms);
+
+	return count;
+}
+
+static struct kobj_attribute run_queue_poll_ms_attr =
+	__ATTR(run_queue_poll_ms, 0600, show_run_queue_poll_ms,
+			store_run_queue_poll_ms);
+
+static ssize_t show_def_timer_ms(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int64_t diff;
+	unsigned int udiff;
+
+	diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time;
+	do_div(diff, 1000 * 1000);
+	udiff = (unsigned int) diff;
+
+	return snprintf(buf, MAX_LONG_SIZE, "%u\n", udiff);
+}
+
+static ssize_t store_def_timer_ms(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int val = 0;
+
+	if (kstrtouint(buf, 0, &val))
+		return -EINVAL;
+
+	rq_info.def_timer_jiffies = msecs_to_jiffies(val);
+
+	rq_info.def_start_time = ktime_to_ns(ktime_get());
+	return count;
+}
+
+static struct kobj_attribute def_timer_ms_attr =
+	__ATTR(def_timer_ms, 0600, show_def_timer_ms,
+			store_def_timer_ms);
+
+static ssize_t show_cpu_normalized_load(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq());
+}
+
+static struct kobj_attribute cpu_normalized_load_attr =
+	__ATTR(cpu_normalized_load, 0600, show_cpu_normalized_load,
+			NULL);
+
+static struct attribute *rq_attrs[] = {
+	&cpu_normalized_load_attr.attr,
+	&def_timer_ms_attr.attr,
+	&run_queue_avg_attr.attr,
+	&run_queue_poll_ms_attr.attr,
+	&hotplug_disabled_attr.attr,
+	NULL,
+};
+
+static struct attribute_group rq_attr_group = {
+	.attrs = rq_attrs,
+};
+
+static int init_rq_attribs(void)
+{
+	int err;
+
+	rq_info.rq_avg = 0;
+	rq_info.attr_group = &rq_attr_group;
+
+	/* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
+	rq_info.kobj = kobject_create_and_add("rq-stats",
+			&get_cpu_device(0)->kobj);
+	if (!rq_info.kobj)
+		return -ENOMEM;
+
+	err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
+	if (err)
+		kobject_put(rq_info.kobj);
+	else
+		kobject_uevent(rq_info.kobj, KOBJ_ADD);
+
+	return err;
+}
+
+static int __init msm_rq_stats_init(void)
+{
+	int ret;
+	int i;
+	struct cpufreq_policy cpu_policy;
+
+#ifndef CONFIG_SMP
+	/* Bail out if this is not an SMP Target */
+	rq_info.init = 0;
+	return -EPERM;
+#endif
+
+	rq_wq = create_singlethread_workqueue("rq_stats");
+	WARN_ON(!rq_wq);
+	INIT_WORK(&rq_info.def_timer_work, def_work_fn);
+	spin_lock_init(&rq_lock);
+	rq_info.rq_poll_jiffies = DEFAULT_RQ_POLL_JIFFIES;
+	rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
+	rq_info.rq_poll_last_jiffy = 0;
+	rq_info.def_timer_last_jiffy = 0;
+	rq_info.hotplug_disabled = 0;
+	ret = init_rq_attribs();
+
+	rq_info.init = 1;
+
+	for_each_possible_cpu(i) {
+		struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
+
+		mutex_init(&pcpu->cpu_load_mutex);
+		cpufreq_get_policy(&cpu_policy, i);
+		pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
+		if (cpu_online(i))
+			pcpu->cur_freq = cpufreq_quick_get(i);
+		cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
+	}
+	freq_transition.notifier_call = cpufreq_transition_handler;
+	cpufreq_register_notifier(&freq_transition,
+					CPUFREQ_TRANSITION_NOTIFIER);
+	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "rq_stats:online",
+			cpu_online_handler, NULL);
+
+	return ret;
+}
+late_initcall(msm_rq_stats_init);
+
+static int __init msm_rq_stats_early_init(void)
+{
+#ifndef CONFIG_SMP
+	/* Bail out if this is not an SMP Target */
+	rq_info.init = 0;
+	return -EPERM;
+#endif
+
+	pm_notifier(system_suspend_handler, 0);
+	return 0;
+}
+core_initcall(msm_rq_stats_early_init);
diff --git a/drivers/soc/qcom/spss_utils.c b/drivers/soc/qcom/spss_utils.c
index fe5498c..0f33a44 100644
--- a/drivers/soc/qcom/spss_utils.c
+++ b/drivers/soc/qcom/spss_utils.c
@@ -345,6 +345,8 @@
 		firmware_name = none_firmware_name;
 		break;
 	default:
+		pr_err("invalid firmware type %d, sysfs entry not created\n",
+			firmware_type);
 		return -EINVAL;
 	}
 
diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c
index 5d350e9..b1ab461 100644
--- a/drivers/soc/qcom/sysmon-qmi.c
+++ b/drivers/soc/qcom/sysmon-qmi.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "sysmon-qmi: %s: " fmt, __func__
@@ -48,7 +48,7 @@
 	.data_type = QMI_EOTI,	\
 	.elem_len  = 0,		\
 	.elem_size = 0,		\
-	.is_array  = NO_ARRAY,	\
+	.array_type  = NO_ARRAY,	\
 	.tlv_type  = 0x00,	\
 	.offset    = 0,		\
 	.ei_array  = NULL,	\
@@ -162,7 +162,7 @@
 		.data_type = QMI_DATA_LEN,
 		.elem_len  = 1,
 		.elem_size = sizeof(uint8_t),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x01,
 		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
 				      subsys_name_len),
@@ -172,7 +172,7 @@
 		.data_type = QMI_UNSIGNED_1_BYTE,
 		.elem_len  = QMI_SSCTL_SUBSYS_NAME_LENGTH,
 		.elem_size = sizeof(char),
-		.is_array  = VAR_LEN_ARRAY,
+		.array_type  = VAR_LEN_ARRAY,
 		.tlv_type  = 0x01,
 		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
 				      subsys_name),
@@ -182,7 +182,7 @@
 		.data_type = QMI_SIGNED_4_BYTE_ENUM,
 		.elem_len  = 1,
 		.elem_size = sizeof(uint32_t),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x02,
 		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
 				      event),
@@ -192,7 +192,7 @@
 		.data_type = QMI_OPT_FLAG,
 		.elem_len  = 1,
 		.elem_size = sizeof(uint8_t),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x10,
 		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
 				      evt_driven_valid),
@@ -202,7 +202,7 @@
 		.data_type = QMI_SIGNED_4_BYTE_ENUM,
 		.elem_len  = 1,
 		.elem_size = sizeof(uint32_t),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x10,
 		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
 				      evt_driven),
@@ -216,7 +216,7 @@
 		.data_type = QMI_STRUCT,
 		.elem_len  = 1,
 		.elem_size = sizeof(struct qmi_response_type_v01),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x02,
 		.offset    = offsetof(struct qmi_ssctl_subsys_event_resp_msg,
 				      resp),
@@ -343,7 +343,7 @@
 		.data_type = QMI_STRUCT,
 		.elem_len  = 1,
 		.elem_size = sizeof(struct qmi_response_type_v01),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x02,
 		.offset    = offsetof(struct qmi_ssctl_shutdown_resp_msg,
 				      resp),
@@ -471,7 +471,7 @@
 		.data_type = QMI_STRUCT,
 		.elem_len  = 1,
 		.elem_size = sizeof(struct qmi_response_type_v01),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x02,
 		.offset    = offsetof(
 			struct qmi_ssctl_get_failure_reason_resp_msg,
@@ -482,7 +482,7 @@
 		.data_type = QMI_OPT_FLAG,
 		.elem_len  = 1,
 		.elem_size = sizeof(uint8_t),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x10,
 		.offset    = offsetof(
 			struct qmi_ssctl_get_failure_reason_resp_msg,
@@ -493,7 +493,7 @@
 		.data_type = QMI_DATA_LEN,
 		.elem_len  = 1,
 		.elem_size = sizeof(uint8_t),
-		.is_array  = NO_ARRAY,
+		.array_type  = NO_ARRAY,
 		.tlv_type  = 0x10,
 		.offset    = offsetof(
 			struct qmi_ssctl_get_failure_reason_resp_msg,
@@ -504,7 +504,7 @@
 		.data_type = QMI_UNSIGNED_1_BYTE,
 		.elem_len  = QMI_SSCTL_ERROR_MSG_LENGTH,
 		.elem_size = sizeof(char),
-		.is_array  = VAR_LEN_ARRAY,
+		.array_type  = VAR_LEN_ARRAY,
 		.tlv_type  = 0x10,
 		.offset    = offsetof(
 			struct qmi_ssctl_get_failure_reason_resp_msg,
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index f35cc10..25abf2d 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -88,7 +88,7 @@
 	u8 *rx_buf;
 	int tx_len;
 	int rx_len;
-	bool dma_pending;
+	unsigned int dma_pending;
 };
 
 static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
@@ -155,8 +155,7 @@
 	/* Write as many bytes as possible to FIFO */
 	bcm2835_wr_fifo(bs);
 
-	/* based on flags decide if we can finish the transfer */
-	if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
+	if (!bs->rx_len) {
 		/* Transfer complete - reset SPI HW */
 		bcm2835_spi_reset_hw(master);
 		/* wake up the framework */
@@ -233,10 +232,9 @@
 	 * is called the tx-dma must have finished - can't get to this
 	 * situation otherwise...
 	 */
-	dmaengine_terminate_all(master->dma_tx);
-
-	/* mark as no longer pending */
-	bs->dma_pending = 0;
+	if (cmpxchg(&bs->dma_pending, true, false)) {
+		dmaengine_terminate_all(master->dma_tx);
+	}
 
 	/* and mark as completed */;
 	complete(&master->xfer_completion);
@@ -342,6 +340,7 @@
 	if (ret) {
 		/* need to reset on errors */
 		dmaengine_terminate_all(master->dma_tx);
+		bs->dma_pending = false;
 		bcm2835_spi_reset_hw(master);
 		return ret;
 	}
@@ -617,10 +616,9 @@
 	struct bcm2835_spi *bs = spi_master_get_devdata(master);
 
 	/* if an error occurred and we have an active dma, then terminate */
-	if (bs->dma_pending) {
+	if (cmpxchg(&bs->dma_pending, true, false)) {
 		dmaengine_terminate_all(master->dma_tx);
 		dmaengine_terminate_all(master->dma_rx);
-		bs->dma_pending = 0;
 	}
 	/* and reset */
 	bcm2835_spi_reset_hw(master);
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 14da8cc..0346630 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -724,13 +724,18 @@
 	struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
 	bool background = tagptr_unfold_tags(t);
 
-	if (atomic_add_return(bios, &io->pending_bios))
-		return;
+	if (!background) {
+		unsigned long flags;
 
-	if (background)
+		spin_lock_irqsave(&io->u.wait.lock, flags);
+		if (!atomic_add_return(bios, &io->pending_bios))
+			wake_up_locked(&io->u.wait);
+		spin_unlock_irqrestore(&io->u.wait.lock, flags);
+		return;
+	}
+
+	if (!atomic_add_return(bios, &io->pending_bios))
 		queue_work(z_erofs_workqueue, &io->u.work);
-	else
-		wake_up(&io->u.wait);
 }
 
 static inline void z_erofs_vle_read_endio(struct bio *bio)
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index 2a48b09..470ea2c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -154,7 +154,7 @@
 
 	pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
 
-	crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep");
+	crypto_ops = lib80211_get_crypto_ops("WEP");
 
 	if (!crypto_ops)
 		return;
@@ -210,7 +210,7 @@
 		void *crypto_private = NULL;
 		int status = _SUCCESS;
 		const int keyindex = prxattrib->key_index;
-		struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep");
+		struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("WEP");
 		char iv[4], icv[4];
 
 		if (!crypto_ops) {
@@ -1292,7 +1292,7 @@
 			struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
 			void *crypto_private = NULL;
 			u8 *key, *pframe = skb->data;
-			struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("CCMP"), "lib80211_crypt_ccmp");
+			struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("CCMP");
 			struct security_priv *psecuritypriv = &padapter->securitypriv;
 			char iv[8], icv[8];
 
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 28cbd6b..dfee698 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -35,6 +35,7 @@
 	{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
 	{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
 	{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+	{USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
 	{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
 	{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
 	{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index b2080d8..e52c3bd 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -831,6 +831,7 @@
 	if (!g_sdio.irq_gpio) {
 		int i;
 
+		cmd.read_write = 0;
 		cmd.function = 1;
 		cmd.address = 0x04;
 		cmd.data = 0;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 8de1601..b19c960 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -598,9 +598,12 @@
 	mutex_unlock(&cdev_list_lock);
 }
 
+static void __cxgbit_free_conn(struct cxgbit_sock *csk);
+
 void cxgbit_free_np(struct iscsi_np *np)
 {
 	struct cxgbit_np *cnp = np->np_context;
+	struct cxgbit_sock *csk, *tmp;
 
 	cnp->com.state = CSK_STATE_DEAD;
 	if (cnp->com.cdev)
@@ -608,6 +611,13 @@
 	else
 		cxgbit_free_all_np(cnp);
 
+	spin_lock_bh(&cnp->np_accept_lock);
+	list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
+		list_del_init(&csk->accept_node);
+		__cxgbit_free_conn(csk);
+	}
+	spin_unlock_bh(&cnp->np_accept_lock);
+
 	np->np_context = NULL;
 	cxgbit_put_cnp(cnp);
 }
@@ -631,8 +641,11 @@
 
 static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
 {
+	struct cxgbit_sock *csk = handle;
+
 	pr_debug("%s cxgbit_device %p\n", __func__, handle);
 	kfree_skb(skb);
+	cxgbit_put_csk(csk);
 }
 
 static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
@@ -705,9 +718,9 @@
 			      csk->tid, 600, __func__);
 }
 
-void cxgbit_free_conn(struct iscsi_conn *conn)
+static void __cxgbit_free_conn(struct cxgbit_sock *csk)
 {
-	struct cxgbit_sock *csk = conn->context;
+	struct iscsi_conn *conn = csk->conn;
 	bool release = false;
 
 	pr_debug("%s: state %d\n",
@@ -716,7 +729,7 @@
 	spin_lock_bh(&csk->lock);
 	switch (csk->com.state) {
 	case CSK_STATE_ESTABLISHED:
-		if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+		if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
 			csk->com.state = CSK_STATE_CLOSING;
 			cxgbit_send_halfclose(csk);
 		} else {
@@ -741,6 +754,11 @@
 		cxgbit_put_csk(csk);
 }
 
+void cxgbit_free_conn(struct iscsi_conn *conn)
+{
+	__cxgbit_free_conn(conn->context);
+}
+
 static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
 {
 	csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
@@ -803,6 +821,7 @@
 	spin_unlock_bh(&cdev->cskq.lock);
 
 	cxgbit_free_skb(csk);
+	cxgbit_put_cnp(csk->cnp);
 	cxgbit_put_cdev(cdev);
 
 	kfree(csk);
@@ -1190,7 +1209,7 @@
 	rpl5->opt0 = cpu_to_be64(opt0);
 	rpl5->opt2 = cpu_to_be32(opt2);
 	set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
-	t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
+	t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
 	cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
 }
 
@@ -1351,6 +1370,7 @@
 		goto rel_skb;
 	}
 
+	cxgbit_get_cnp(cnp);
 	cxgbit_get_cdev(cdev);
 
 	spin_lock(&cdev->cskq.lock);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index f3f8856..c011c82 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -58,6 +58,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	kref_init(&cdev->kref);
+	spin_lock_init(&cdev->np_lock);
 
 	cdev->lldi = *lldi;
 
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index cb0461a1..93424db 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -108,12 +108,17 @@
 
 	buf[7] = 0x2; /* CmdQue=1 */
 
-	memcpy(&buf[8], "LIO-ORG ", 8);
-	memset(&buf[16], 0x20, 16);
+	/*
+	 * ASCII data fields described as being left-aligned shall have any
+	 * unused bytes at the end of the field (i.e., highest offset) and the
+	 * unused bytes shall be filled with ASCII space characters (20h).
+	 */
+	memset(&buf[8], 0x20, 8 + 16 + 4);
+	memcpy(&buf[8], "LIO-ORG", sizeof("LIO-ORG") - 1);
 	memcpy(&buf[16], dev->t10_wwn.model,
-	       min_t(size_t, strlen(dev->t10_wwn.model), 16));
+	       strnlen(dev->t10_wwn.model, 16));
 	memcpy(&buf[32], dev->t10_wwn.revision,
-	       min_t(size_t, strlen(dev->t10_wwn.revision), 4));
+	       strnlen(dev->t10_wwn.revision, 4));
 	buf[4] = 31; /* Set additional length to 31 */
 
 	return 0;
@@ -251,7 +256,9 @@
 	buf[off] = 0x2; /* ASCII */
 	buf[off+1] = 0x1; /* T10 Vendor ID */
 	buf[off+2] = 0x0;
-	memcpy(&buf[off+4], "LIO-ORG", 8);
+	/* left align Vendor ID and pad with spaces */
+	memset(&buf[off+4], 0x20, 8);
+	memcpy(&buf[off+4], "LIO-ORG", sizeof("LIO-ORG") - 1);
 	/* Extra Byte for NULL Terminator */
 	id_len++;
 	/* Identifier Length */
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index fc3093d2..3f7aad4 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -224,19 +224,28 @@
 	sub_api_initialized = 1;
 }
 
+static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
+{
+	struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
+
+	wake_up(&sess->cmd_list_wq);
+}
+
 /**
  * transport_init_session - initialize a session object
  * @se_sess: Session object pointer.
  *
  * The caller must have zero-initialized @se_sess before calling this function.
  */
-void transport_init_session(struct se_session *se_sess)
+int transport_init_session(struct se_session *se_sess)
 {
 	INIT_LIST_HEAD(&se_sess->sess_list);
 	INIT_LIST_HEAD(&se_sess->sess_acl_list);
 	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
 	spin_lock_init(&se_sess->sess_cmd_lock);
 	init_waitqueue_head(&se_sess->cmd_list_wq);
+	return percpu_ref_init(&se_sess->cmd_count,
+			       target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
 }
 EXPORT_SYMBOL(transport_init_session);
 
@@ -247,6 +256,7 @@
 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
 {
 	struct se_session *se_sess;
+	int ret;
 
 	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
 	if (!se_sess) {
@@ -254,7 +264,11 @@
 				" se_sess_cache\n");
 		return ERR_PTR(-ENOMEM);
 	}
-	transport_init_session(se_sess);
+	ret = transport_init_session(se_sess);
+	if (ret < 0) {
+		kfree(se_sess);
+		return ERR_PTR(ret);
+	}
 	se_sess->sup_prot_ops = sup_prot_ops;
 
 	return se_sess;
@@ -581,6 +595,7 @@
 		sbitmap_queue_free(&se_sess->sess_tag_pool);
 		kvfree(se_sess->sess_cmd_map);
 	}
+	percpu_ref_exit(&se_sess->cmd_count);
 	kmem_cache_free(se_sess_cache, se_sess);
 }
 EXPORT_SYMBOL(transport_free_session);
@@ -2724,6 +2739,7 @@
 	}
 	se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
 	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
+	percpu_ref_get(&se_sess->cmd_count);
 out:
 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 
@@ -2754,8 +2770,6 @@
 	if (se_sess) {
 		spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 		list_del_init(&se_cmd->se_cmd_list);
-		if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
-			wake_up(&se_sess->cmd_list_wq);
 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 	}
 
@@ -2763,6 +2777,8 @@
 	se_cmd->se_tfo->release_cmd(se_cmd);
 	if (compl)
 		complete(compl);
+
+	percpu_ref_put(&se_sess->cmd_count);
 }
 
 /**
@@ -2891,6 +2907,8 @@
 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 	se_sess->sess_tearing_down = 1;
 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+	percpu_ref_kill(&se_sess->cmd_count);
 }
 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
 
@@ -2905,17 +2923,14 @@
 
 	WARN_ON_ONCE(!se_sess->sess_tearing_down);
 
-	spin_lock_irq(&se_sess->sess_cmd_lock);
 	do {
-		ret = wait_event_lock_irq_timeout(
-				se_sess->cmd_list_wq,
-				list_empty(&se_sess->sess_cmd_list),
-				se_sess->sess_cmd_lock, 180 * HZ);
+		ret = wait_event_timeout(se_sess->cmd_list_wq,
+				percpu_ref_is_zero(&se_sess->cmd_count),
+				180 * HZ);
 		list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
 			target_show_cmd("session shutdown: still waiting for ",
 					cmd);
 	} while (ret <= 0);
-	spin_unlock_irq(&se_sess->sess_cmd_lock);
 }
 EXPORT_SYMBOL(target_wait_for_sess_cmds);
 
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 2718a93..7cdb5d7 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -480,6 +480,8 @@
 
 int target_xcopy_setup_pt(void)
 {
+	int ret;
+
 	xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
 	if (!xcopy_wq) {
 		pr_err("Unable to allocate xcopy_wq\n");
@@ -497,7 +499,9 @@
 	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
 	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
 	memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
-	transport_init_session(&xcopy_pt_sess);
+	ret = transport_init_session(&xcopy_pt_sess);
+	if (ret < 0)
+		return ret;
 
 	xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
 	xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 3037b9d..5c15a63 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -26,11 +26,12 @@
 #include <linux/thermal.h>
 #include <linux/cpufreq.h>
 #include <linux/err.h>
+#include <linux/idr.h>
 #include <linux/pm_opp.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/cpu_cooling.h>
-#include <linux/of_device.h>
+#include <linux/energy_model.h>
 
 #include <trace/events/thermal.h>
 
@@ -49,19 +50,6 @@
  */
 
 /**
- * struct freq_table - frequency table along with power entries
- * @frequency:	frequency in KHz
- * @power:	power in mW
- *
- * This structure is built when the cooling device registers and helps
- * in translating frequency to power and vice versa.
- */
-struct freq_table {
-	u32 frequency;
-	u32 power;
-};
-
-/**
  * struct time_in_idle - Idle time stats
  * @time: previous reading of the absolute time that this cpu was idle
  * @timestamp: wall time of the last invocation of get_cpu_idle_time_us()
@@ -80,13 +68,9 @@
  *	cooling	devices.
  * @clipped_freq: integer value representing the absolute value of the clipped
  *	frequency.
- * @cpufreq_floor_state: integer value representing the frequency floor state
- *	of cpufreq cooling devices.
- * @floor_freq: integer value representing the absolute value of the floor
- *	frequency.
  * @max_level: maximum cooling level. One less than total number of valid
  *	cpufreq frequencies.
- * @freq_table: Freq table in descending order of frequencies
+ * @em: Reference on the Energy Model of the device
  * @cdev: thermal_cooling_device pointer to keep track of the
  *	registered cooling device.
  * @policy: cpufreq policy.
@@ -101,43 +85,21 @@
 	u32 last_load;
 	unsigned int cpufreq_state;
 	unsigned int clipped_freq;
-	unsigned int cpufreq_floor_state;
-	unsigned int floor_freq;
 	unsigned int max_level;
-	struct freq_table *freq_table;	/* In descending order */
+	struct em_perf_domain *em;
 	struct thermal_cooling_device *cdev;
 	struct cpufreq_policy *policy;
 	struct list_head node;
 	struct time_in_idle *idle_time;
-	struct cpu_cooling_ops *plat_ops;
 };
 
+static DEFINE_IDA(cpufreq_ida);
 static DEFINE_MUTEX(cooling_list_lock);
 static LIST_HEAD(cpufreq_cdev_list);
 
 /* Below code defines functions to be used for cpufreq as cooling device */
 
 /**
- * get_level: Find the level for a particular frequency
- * @cpufreq_cdev: cpufreq_cdev for which the property is required
- * @freq: Frequency
- *
- * Return: level corresponding to the frequency.
- */
-static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
-			       unsigned int freq)
-{
-	struct freq_table *freq_table = cpufreq_cdev->freq_table;
-	unsigned long level;
-
-	for (level = 1; level <= cpufreq_cdev->max_level; level++)
-		if (freq > freq_table[level].frequency)
-			break;
-
-	return level - 1;
-}
-
-/**
  * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
  * @nb:	struct notifier_block * with callback info.
  * @event: value showing cpufreq event for which this function invoked.
@@ -153,7 +115,7 @@
 				    unsigned long event, void *data)
 {
 	struct cpufreq_policy *policy = data;
-	unsigned long clipped_freq = ULONG_MAX, floor_freq = 0;
+	unsigned long clipped_freq;
 	struct cpufreq_cooling_device *cpufreq_cdev;
 
 	if (event != CPUFREQ_ADJUST)
@@ -178,122 +140,64 @@
 		 *
 		 * But, if clipped_freq is greater than policy->max, we don't
 		 * need to do anything.
-		 *
-		 * Similarly, if policy minimum set by the user is less than
-		 * the floor_frequency, then adjust the policy->min.
 		 */
 		clipped_freq = cpufreq_cdev->clipped_freq;
-		floor_freq = cpufreq_cdev->floor_freq;
-		if (policy->max > clipped_freq || policy->min < floor_freq)
-			cpufreq_verify_within_limits(policy, floor_freq,
-							clipped_freq);
+
+		if (policy->max > clipped_freq)
+			cpufreq_verify_within_limits(policy, 0, clipped_freq);
 		break;
 	}
-
 	mutex_unlock(&cooling_list_lock);
 
 	return NOTIFY_OK;
 }
 
+#ifdef CONFIG_ENERGY_MODEL
 /**
- * update_freq_table() - Update the freq table with power numbers
- * @cpufreq_cdev:	the cpufreq cooling device in which to update the table
- * @capacitance: dynamic power coefficient for these cpus
+ * get_level: Find the level for a particular frequency
+ * @cpufreq_cdev: cpufreq_cdev for which the property is required
+ * @freq: Frequency
  *
- * Update the freq table with power numbers.  This table will be used in
- * cpu_power_to_freq() and cpu_freq_to_power() to convert between power and
- * frequency efficiently.  Power is stored in mW, frequency in KHz.  The
- * resulting table is in descending order.
- *
- * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
- * or -ENOMEM if we run out of memory.
+ * Return: level corresponding to the frequency.
  */
-static int update_freq_table(struct cpufreq_cooling_device *cpufreq_cdev,
-			     u32 capacitance)
+static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
+			       unsigned int freq)
 {
-	struct freq_table *freq_table = cpufreq_cdev->freq_table;
-	struct dev_pm_opp *opp;
-	struct device *dev = NULL;
-	int num_opps = 0, cpu = cpufreq_cdev->policy->cpu, i;
+	int i;
 
-	dev = get_cpu_device(cpu);
-	if (unlikely(!dev)) {
-		dev_warn(&cpufreq_cdev->cdev->device,
-			 "No cpu device for cpu %d\n", cpu);
-		return -ENODEV;
+	for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
+		if (freq > cpufreq_cdev->em->table[i].frequency)
+			break;
 	}
 
-	num_opps = dev_pm_opp_get_opp_count(dev);
-	if (num_opps < 0)
-		return num_opps;
-
-	/*
-	 * The cpufreq table is also built from the OPP table and so the count
-	 * should match.
-	 */
-	if (num_opps != cpufreq_cdev->max_level + 1) {
-		dev_warn(dev, "Number of OPPs not matching with max_levels\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i <= cpufreq_cdev->max_level; i++) {
-		unsigned long freq = freq_table[i].frequency * 1000;
-		u32 freq_mhz = freq_table[i].frequency / 1000;
-		u64 power;
-		u32 voltage_mv;
-
-		/*
-		 * Find ceil frequency as 'freq' may be slightly lower than OPP
-		 * freq due to truncation while converting to kHz.
-		 */
-		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
-		if (IS_ERR(opp)) {
-			dev_err(dev, "failed to get opp for %lu frequency\n",
-				freq);
-			return -EINVAL;
-		}
-
-		voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
-		dev_pm_opp_put(opp);
-
-		/*
-		 * Do the multiplication with MHz and millivolt so as
-		 * to not overflow.
-		 */
-		power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
-		do_div(power, 1000000000);
-
-		/* power is stored in mW */
-		freq_table[i].power = power;
-	}
-
-	return 0;
+	return cpufreq_cdev->max_level - i - 1;
 }
 
+
 static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
 			     u32 freq)
 {
 	int i;
-	struct freq_table *freq_table = cpufreq_cdev->freq_table;
 
-	for (i = 1; i <= cpufreq_cdev->max_level; i++)
-		if (freq > freq_table[i].frequency)
+	for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
+		if (freq > cpufreq_cdev->em->table[i].frequency)
 			break;
+	}
 
-	return freq_table[i - 1].power;
+	return cpufreq_cdev->em->table[i + 1].power;
 }
 
 static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
 			     u32 power)
 {
 	int i;
-	struct freq_table *freq_table = cpufreq_cdev->freq_table;
 
-	for (i = 1; i <= cpufreq_cdev->max_level; i++)
-		if (power > freq_table[i].power)
+	for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
+		if (power > cpufreq_cdev->em->table[i].power)
 			break;
+	}
 
-	return freq_table[i - 1].frequency;
+	return cpufreq_cdev->em->table[i + 1].frequency;
 }
 
 /**
@@ -343,6 +247,7 @@
 	raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq);
 	return (raw_cpu_power * cpufreq_cdev->last_load) / 100;
 }
+#endif
 
 /* cpufreq cooling device callback functions are defined below */
 
@@ -366,67 +271,6 @@
 }
 
 /**
- * cpufreq_get_min_state - callback function to get the device floor state.
- * @cdev: thermal cooling device pointer.
- * @state: fill this variable with the cooling device floor.
- *
- * Callback for the thermal cooling device to return the cpufreq
- * floor state.
- *
- * Return: 0 on success, an error code otherwise.
- */
-static int cpufreq_get_min_state(struct thermal_cooling_device *cdev,
-				 unsigned long *state)
-{
-	struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
-
-	*state = cpufreq_cdev->cpufreq_floor_state;
-
-	return 0;
-}
-
-/**
- * cpufreq_set_min_state - callback function to set the device floor state.
- * @cdev: thermal cooling device pointer.
- * @state: set this variable to the current cooling state.
- *
- * Callback for the thermal cooling device to change the cpufreq
- * floor state.
- *
- * Return: 0 on success, an error code otherwise.
- */
-static int cpufreq_set_min_state(struct thermal_cooling_device *cdev,
-				 unsigned long state)
-{
-	struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
-	unsigned int cpu = cpufreq_cdev->policy->cpu;
-	unsigned int floor_freq;
-
-	if (state > cpufreq_cdev->max_level)
-		state = cpufreq_cdev->max_level;
-
-	if (cpufreq_cdev->cpufreq_floor_state == state)
-		return 0;
-
-	cpufreq_cdev->cpufreq_floor_state = state;
-	floor_freq = cpufreq_cdev->freq_table[state].frequency;
-	cpufreq_cdev->floor_freq = floor_freq;
-
-	/*
-	 * Check if the device has a platform mitigation function that
-	 * can handle the CPU freq mitigation, if not, notify cpufreq
-	 * framework.
-	 */
-	if (cpufreq_cdev->plat_ops &&
-		cpufreq_cdev->plat_ops->floor_limit)
-		cpufreq_cdev->plat_ops->floor_limit(cpu, floor_freq);
-	else
-		cpufreq_update_policy(cpu);
-
-	return 0;
-}
-
-/**
  * cpufreq_get_cur_state - callback function to get the current cooling state.
  * @cdev: thermal cooling device pointer.
  * @state: fill this variable with the current cooling state.
@@ -446,6 +290,30 @@
 	return 0;
 }
 
+static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev,
+			      unsigned long state)
+{
+	struct cpufreq_policy *policy;
+	unsigned long idx;
+
+#ifdef CONFIG_ENERGY_MODEL
+	/* Use the Energy Model table if available */
+	if (cpufreq_cdev->em) {
+		idx = cpufreq_cdev->max_level - state;
+		return cpufreq_cdev->em->table[idx].frequency;
+	}
+#endif
+
+	/* Otherwise, fallback on the CPUFreq table */
+	policy = cpufreq_cdev->policy;
+	if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
+		idx = cpufreq_cdev->max_level - state;
+	else
+		idx = state;
+
+	return policy->freq_table[idx].frequency;
+}
+
 /**
  * cpufreq_set_cur_state - callback function to set the current cooling state.
  * @cdev: thermal cooling device pointer.
@@ -470,24 +338,16 @@
 	if (cpufreq_cdev->cpufreq_state == state)
 		return 0;
 
-	clip_freq = cpufreq_cdev->freq_table[state].frequency;
+	clip_freq = get_state_freq(cpufreq_cdev, state);
 	cpufreq_cdev->cpufreq_state = state;
 	cpufreq_cdev->clipped_freq = clip_freq;
 
-	/* Check if the device has a platform mitigation function that
-	 * can handle the CPU freq mitigation, if not, notify cpufreq
-	 * framework.
-	 */
-	if (cpufreq_cdev->plat_ops &&
-		cpufreq_cdev->plat_ops->ceil_limit)
-		cpufreq_cdev->plat_ops->ceil_limit(cpufreq_cdev->policy->cpu,
-							clip_freq);
-	else
-		cpufreq_update_policy(cpufreq_cdev->policy->cpu);
+	cpufreq_update_policy(cpufreq_cdev->policy->cpu);
 
 	return 0;
 }
 
+#ifdef CONFIG_ENERGY_MODEL
 /**
  * cpufreq_get_requested_power() - get the current power
  * @cdev:	&thermal_cooling_device pointer
@@ -578,7 +438,7 @@
 			       struct thermal_zone_device *tz,
 			       unsigned long state, u32 *power)
 {
-	unsigned int freq, num_cpus;
+	unsigned int freq, num_cpus, idx;
 	struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
 
 	/* Request state should be less than max_level */
@@ -587,7 +447,8 @@
 
 	num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
 
-	freq = cpufreq_cdev->freq_table[state].frequency;
+	idx = cpufreq_cdev->max_level - state;
+	freq = cpufreq_cdev->em->table[idx].frequency;
 	*power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
 
 	return 0;
@@ -634,16 +495,6 @@
 	return 0;
 }
 
-/* Bind cpufreq callbacks to thermal cooling device ops */
-
-static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
-	.get_max_state = cpufreq_get_max_state,
-	.get_cur_state = cpufreq_get_cur_state,
-	.set_cur_state = cpufreq_set_cur_state,
-	.set_min_state = cpufreq_set_min_state,
-	.get_min_state = cpufreq_get_min_state,
-};
-
 static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
 	.get_max_state		= cpufreq_get_max_state,
 	.get_cur_state		= cpufreq_get_cur_state,
@@ -652,35 +503,27 @@
 	.state2power		= cpufreq_state2power,
 	.power2state		= cpufreq_power2state,
 };
+#endif
+
+/* Bind cpufreq callbacks to thermal cooling device ops */
+
+static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
+	.get_max_state = cpufreq_get_max_state,
+	.get_cur_state = cpufreq_get_cur_state,
+	.set_cur_state = cpufreq_set_cur_state,
+};
 
 /* Notifier for cpufreq policy change */
 static struct notifier_block thermal_cpufreq_notifier_block = {
 	.notifier_call = cpufreq_thermal_notifier,
 };
 
-static unsigned int find_next_max(struct cpufreq_frequency_table *table,
-				  unsigned int prev_max)
-{
-	struct cpufreq_frequency_table *pos;
-	unsigned int max = 0;
-
-	cpufreq_for_each_valid_entry(pos, table) {
-		if (pos->frequency > max && pos->frequency < prev_max)
-			max = pos->frequency;
-	}
-
-	return max;
-}
-
 /**
  * __cpufreq_cooling_register - helper function to create cpufreq cooling device
  * @np: a valid struct device_node to the cooling device device tree node
  * @policy: cpufreq policy
  * Normally this should be same as cpufreq policy->related_cpus.
- * @capacitance: dynamic power coefficient for these cpus
- * @plat_ops: function that does the mitigation by changing the
- *                   frequencies (Optional). By default, cpufreq framework will
- *                   be notified of the new limits.
+ * @try_model: true if a power model should be used
  *
  * This interface function registers the cpufreq cooling device with the name
  * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -692,13 +535,12 @@
  */
 static struct thermal_cooling_device *
 __cpufreq_cooling_register(struct device_node *np,
-			struct cpufreq_policy *policy, u32 capacitance,
-			struct cpu_cooling_ops *plat_ops)
+			struct cpufreq_policy *policy, bool try_model)
 {
 	struct thermal_cooling_device *cdev;
 	struct cpufreq_cooling_device *cpufreq_cdev;
 	char dev_name[THERMAL_NAME_LENGTH];
-	unsigned int freq, i, num_cpus;
+	unsigned int i, num_cpus;
 	int ret;
 	struct thermal_cooling_device_ops *cooling_ops;
 	bool first;
@@ -732,54 +574,37 @@
 	/* max_level is an index, not a counter */
 	cpufreq_cdev->max_level = i - 1;
 
-	cpufreq_cdev->freq_table = kmalloc_array(i,
-					sizeof(*cpufreq_cdev->freq_table),
-					GFP_KERNEL);
-	if (!cpufreq_cdev->freq_table) {
-		cdev = ERR_PTR(-ENOMEM);
+#ifdef CONFIG_ENERGY_MODEL
+	if (try_model) {
+		struct em_perf_domain *em = em_cpu_get(policy->cpu);
+
+		if (!em || !cpumask_equal(policy->related_cpus,
+					  to_cpumask(em->cpus))) {
+			cdev = ERR_PTR(-EINVAL);
+			goto free_idle_time;
+		}
+		cpufreq_cdev->em = em;
+		cooling_ops = &cpufreq_power_cooling_ops;
+	} else
+#endif
+		cooling_ops = &cpufreq_cooling_ops;
+
+	ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
+	if (ret < 0) {
+		cdev = ERR_PTR(ret);
 		goto free_idle_time;
 	}
-
-	cpufreq_cdev->id = policy->cpu;
+	cpufreq_cdev->id = ret;
 
 	snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
 		 cpufreq_cdev->id);
 
-	/* Fill freq-table in descending order of frequencies */
-	for (i = 0, freq = -1; i <= cpufreq_cdev->max_level; i++) {
-		freq = find_next_max(policy->freq_table, freq);
-		cpufreq_cdev->freq_table[i].frequency = freq;
-
-		/* Warn for duplicate entries */
-		if (!freq)
-			pr_warn("%s: table has duplicate entries\n", __func__);
-		else
-			pr_debug("%s: freq:%u KHz\n", __func__, freq);
-	}
-
-	if (capacitance) {
-		ret = update_freq_table(cpufreq_cdev, capacitance);
-		if (ret) {
-			cdev = ERR_PTR(ret);
-			goto free_table;
-		}
-
-		cooling_ops = &cpufreq_power_cooling_ops;
-	} else {
-		cooling_ops = &cpufreq_cooling_ops;
-	}
-
-	cpufreq_cdev->plat_ops = plat_ops;
-
 	cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
 						  cooling_ops);
 	if (IS_ERR(cdev))
-		goto free_table;
+		goto remove_ida;
 
-	cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
-	cpufreq_cdev->floor_freq =
-		cpufreq_cdev->freq_table[cpufreq_cdev->max_level].frequency;
-	cpufreq_cdev->cpufreq_floor_state = cpufreq_cdev->max_level;
+	cpufreq_cdev->clipped_freq = get_state_freq(cpufreq_cdev, 0);
 	cpufreq_cdev->cdev = cdev;
 
 	mutex_lock(&cooling_list_lock);
@@ -788,14 +613,14 @@
 	list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
 	mutex_unlock(&cooling_list_lock);
 
-	if (first && !cpufreq_cdev->plat_ops)
+	if (first)
 		cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
 					  CPUFREQ_POLICY_NOTIFIER);
 
 	return cdev;
 
-free_table:
-	kfree(cpufreq_cdev->freq_table);
+remove_ida:
+	ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
 free_idle_time:
 	kfree(cpufreq_cdev->idle_time);
 free_cdev:
@@ -817,7 +642,7 @@
 struct thermal_cooling_device *
 cpufreq_cooling_register(struct cpufreq_policy *policy)
 {
-	return __cpufreq_cooling_register(NULL, policy, 0, NULL);
+	return __cpufreq_cooling_register(NULL, policy, false);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
 
@@ -845,7 +670,6 @@
 {
 	struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
 	struct thermal_cooling_device *cdev = NULL;
-	u32 capacitance = 0;
 
 	if (!np) {
 		pr_err("cpu_cooling: OF node not available for cpu%d\n",
@@ -854,11 +678,7 @@
 	}
 
 	if (of_find_property(np, "#cooling-cells", NULL)) {
-		of_property_read_u32(np, "dynamic-power-coefficient",
-				     &capacitance);
-
-		cdev = __cpufreq_cooling_register(np, policy, capacitance,
-						NULL);
+		cdev = __cpufreq_cooling_register(np, policy, true);
 		if (IS_ERR(cdev)) {
 			pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n",
 			       policy->cpu, PTR_ERR(cdev));
@@ -872,47 +692,6 @@
 EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
 
 /**
- * cpufreq_platform_cooling_register() - create cpufreq cooling device with
- * additional platform specific mitigation function.
- *
- * @policy: cpufreq policy
- * @plat_ops: the platform mitigation functions that will be called insted of
- * cpufreq, if provided.
- *
- * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
- */
-struct thermal_cooling_device *
-cpufreq_platform_cooling_register(struct cpufreq_policy *policy,
-				struct cpu_cooling_ops *plat_ops)
-{
-	struct device_node *cpu_node = NULL;
-	u32 capacitance = 0;
-	struct thermal_cooling_device *cdev = NULL;
-
-	cpu_node = of_cpu_device_node_get(policy->cpu);
-	if (!cpu_node) {
-		pr_err("No cpu node\n");
-		return ERR_PTR(-EINVAL);
-	}
-	if (of_find_property(cpu_node, "#cooling-cells", NULL)) {
-		of_property_read_u32(cpu_node, "dynamic-power-coefficient",
-				     &capacitance);
-
-		cdev = __cpufreq_cooling_register(cpu_node, policy, capacitance,
-							plat_ops);
-		if (IS_ERR(cdev))
-			pr_err("cpu_cooling: cpu%d cooling device err: %ld\n",
-			       policy->cpu, PTR_ERR(cdev));
-	}
-
-	of_node_put(cpu_node);
-	return cdev;
-}
-EXPORT_SYMBOL(cpufreq_platform_cooling_register);
-
-
-/**
  * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
  * @cdev: thermal cooling device pointer.
  *
@@ -934,16 +713,13 @@
 	last = list_empty(&cpufreq_cdev_list);
 	mutex_unlock(&cooling_list_lock);
 
-	if (last) {
-		if (!cpufreq_cdev->plat_ops)
-			cpufreq_unregister_notifier(
-					&thermal_cpufreq_notifier_block,
-					CPUFREQ_POLICY_NOTIFIER);
-	}
+	if (last)
+		cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
+					    CPUFREQ_POLICY_NOTIFIER);
 
 	thermal_cooling_device_unregister(cpufreq_cdev->cdev);
+	ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
 	kfree(cpufreq_cdev->idle_time);
-	kfree(cpufreq_cdev->freq_table);
 	kfree(cpufreq_cdev);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index dabb391..bb63519 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -597,6 +597,7 @@
 				/* too large for caller's buffer */
 				ret = -EOVERFLOW;
 			} else {
+				__set_current_state(TASK_RUNNING);
 				if (copy_to_user(buf, rbuf->buf, rbuf->count))
 					ret = -EFAULT;
 				else
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index ebd33c0..89ade21 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2780,6 +2780,7 @@
 		.name	= "sbsa-uart",
 		.of_match_table = of_match_ptr(sbsa_uart_of_match),
 		.acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
+		.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
 	},
 };
 
@@ -2808,6 +2809,7 @@
 	.drv = {
 		.name	= "uart-pl011",
 		.pm	= &pl011_dev_pm_ops,
+		.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
 	},
 	.id_table	= pl011_ids,
 	.probe		= pl011_probe,
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index fd80d99..0bdf168 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -919,6 +919,7 @@
 	.driver		= {
 		.name	= PIC32_DEV_NAME,
 		.of_match_table	= of_match_ptr(pic32_serial_dt_ids),
+		.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_PIC32),
 	},
 };
 
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 80bb56f..7fe6794 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -205,10 +205,15 @@
 	if (!state->xmit.buf) {
 		state->xmit.buf = (unsigned char *) page;
 		uart_circ_clear(&state->xmit);
+		uart_port_unlock(uport, flags);
 	} else {
+		uart_port_unlock(uport, flags);
+		/*
+		 * Do not free() the page under the port lock, see
+		 * uart_shutdown().
+		 */
 		free_page(page);
 	}
-	uart_port_unlock(uport, flags);
 
 	retval = uport->ops->startup(uport);
 	if (retval == 0) {
@@ -268,6 +273,7 @@
 	struct uart_port *uport = uart_port_check(state);
 	struct tty_port *port = &state->port;
 	unsigned long flags = 0;
+	char *xmit_buf = NULL;
 
 	/*
 	 * Set the TTY IO error marker
@@ -298,14 +304,18 @@
 	tty_port_set_suspended(port, 0);
 
 	/*
-	 * Free the transmit buffer page.
+	 * Do not free() the transmit buffer page under the port lock since
+	 * this can create various circular locking scenarios. For instance,
+	 * console driver may need to allocate/free a debug object, which
+	 * can endup in printk() recursion.
 	 */
 	uart_port_lock(state, flags);
-	if (state->xmit.buf) {
-		free_page((unsigned long)state->xmit.buf);
-		state->xmit.buf = NULL;
-	}
+	xmit_buf = state->xmit.buf;
+	state->xmit.buf = NULL;
 	uart_port_unlock(uport, flags);
+
+	if (xmit_buf)
+		free_page((unsigned long)xmit_buf);
 }
 
 /**
@@ -540,10 +550,12 @@
 	int ret = 0;
 
 	circ = &state->xmit;
-	if (!circ->buf)
-		return 0;
-
 	port = uart_port_lock(state, flags);
+	if (!circ->buf) {
+		uart_port_unlock(port, flags);
+		return 0;
+	}
+
 	if (port && uart_circ_chars_free(circ) != 0) {
 		circ->buf[circ->head] = c;
 		circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
@@ -576,11 +588,13 @@
 		return -EL3HLT;
 	}
 
-	circ = &state->xmit;
-	if (!circ->buf)
-		return 0;
-
 	port = uart_port_lock(state, flags);
+	circ = &state->xmit;
+	if (!circ->buf) {
+		uart_port_unlock(port, flags);
+		return 0;
+	}
+
 	while (port) {
 		c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
 		if (count < c)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 6cf3e9b..3e774756 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1394,22 +1394,43 @@
 static enum su_type su_get_type(struct device_node *dp)
 {
 	struct device_node *ap = of_find_node_by_path("/aliases");
+	enum su_type rc = SU_PORT_PORT;
 
 	if (ap) {
 		const char *keyb = of_get_property(ap, "keyboard", NULL);
 		const char *ms = of_get_property(ap, "mouse", NULL);
+		struct device_node *match;
 
 		if (keyb) {
-			if (dp == of_find_node_by_path(keyb))
-				return SU_PORT_KBD;
+			match = of_find_node_by_path(keyb);
+
+			/*
+			 * The pointer is used as an identifier not
+			 * as a pointer, we can drop the refcount on
+			 * the of__node immediately after getting it.
+			 */
+			of_node_put(match);
+
+			if (dp == match) {
+				rc = SU_PORT_KBD;
+				goto out;
+			}
 		}
 		if (ms) {
-			if (dp == of_find_node_by_path(ms))
-				return SU_PORT_MS;
+			match = of_find_node_by_path(ms);
+
+			of_node_put(match);
+
+			if (dp == match) {
+				rc = SU_PORT_MS;
+				goto out;
+			}
 		}
 	}
 
-	return SU_PORT_PORT;
+out:
+	of_node_put(ap);
+	return rc;
 }
 
 static int su_probe(struct platform_device *op)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index a48f19b..87d8dd9 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -125,7 +125,7 @@
 #define CDNS_UART_IXR_RXTRIG	0x00000001 /* RX FIFO trigger interrupt */
 #define CDNS_UART_IXR_RXFULL	0x00000004 /* RX FIFO full interrupt. */
 #define CDNS_UART_IXR_RXEMPTY	0x00000002 /* RX FIFO empty interrupt. */
-#define CDNS_UART_IXR_MASK	0x00001FFF /* Valid bit mask */
+#define CDNS_UART_IXR_RXMASK	0x000021e7 /* Valid RX bit mask */
 
 	/*
 	 * Do not enable parity error interrupt for the following
@@ -362,7 +362,7 @@
 		cdns_uart_handle_tx(dev_id);
 		isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
 	}
-	if (isrstatus & CDNS_UART_IXR_MASK)
+	if (isrstatus & CDNS_UART_IXR_RXMASK)
 		cdns_uart_handle_rx(dev_id, isrstatus);
 
 	spin_unlock(&port->lock);
@@ -1608,6 +1608,7 @@
 		.name = CDNS_UART_NAME,
 		.of_match_table = cdns_uart_of_match,
 		.pm = &cdns_uart_dev_pm_ops,
+		.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_XILINX_PS_UART),
 		},
 };
 
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index d6f42b5..e7d192e 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1255,7 +1255,8 @@
 static int tty_reopen(struct tty_struct *tty)
 {
 	struct tty_driver *driver = tty->driver;
-	int retval;
+	struct tty_ldisc *ld;
+	int retval = 0;
 
 	if (driver->type == TTY_DRIVER_TYPE_PTY &&
 	    driver->subtype == PTY_TYPE_MASTER)
@@ -1267,14 +1268,21 @@
 	if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
 		return -EBUSY;
 
-	tty->count++;
+	ld = tty_ldisc_ref_wait(tty);
+	if (ld) {
+		tty_ldisc_deref(ld);
+	} else {
+		retval = tty_ldisc_lock(tty, 5 * HZ);
+		if (retval)
+			return retval;
 
-	if (tty->ldisc)
-		return 0;
+		if (!tty->ldisc)
+			retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+		tty_ldisc_unlock(tty);
+	}
 
-	retval = tty_ldisc_reinit(tty, tty->termios.c_line);
-	if (retval)
-		tty->count--;
+	if (retval == 0)
+		tty->count++;
 
 	return retval;
 }
@@ -2180,7 +2188,8 @@
 	ld = tty_ldisc_ref_wait(tty);
 	if (!ld)
 		return -EIO;
-	ld->ops->receive_buf(tty, &ch, &mbz, 1);
+	if (ld->ops->receive_buf)
+		ld->ops->receive_buf(tty, &ch, &mbz, 1);
 	tty_ldisc_deref(ld);
 	return 0;
 }
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 0c98d88..b989ca2 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -293,6 +293,16 @@
 	if (!locked)
 		atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
 	list_del(&waiter.list);
+
+	/*
+	 * In case of timeout, wake up every reader who gave the right of way
+	 * to writer. Prevent separation readers into two groups:
+	 * one that helds semaphore and another that sleeps.
+	 * (in case of no contention with a writer)
+	 */
+	if (!locked && list_empty(&sem->write_wait))
+		__ldsem_wake_readers(sem);
+
 	raw_spin_unlock_irq(&sem->wait_lock);
 
 	__set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 476ec4b1..da33589 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1275,6 +1275,7 @@
 	if (con_is_visible(vc))
 		update_screen(vc);
 	vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
+	notify_update(vc);
 	return err;
 }
 
@@ -2767,8 +2768,8 @@
 	con_flush(vc, draw_from, draw_to, &draw_x);
 	vc_uniscr_debug_check(vc);
 	console_conditional_schedule();
-	console_unlock();
 	notify_update(vc);
+	console_unlock();
 	return n;
 }
 
@@ -2887,8 +2888,7 @@
 	unsigned char c;
 	static DEFINE_SPINLOCK(printing_lock);
 	const ushort *start;
-	ushort cnt = 0;
-	ushort myx;
+	ushort start_x, cnt;
 	int kmsg_console;
 
 	/* console busy or not yet initialized */
@@ -2901,10 +2901,6 @@
 	if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
 		vc = vc_cons[kmsg_console - 1].d;
 
-	/* read `x' only after setting currcons properly (otherwise
-	   the `x' macro will read the x of the foreground console). */
-	myx = vc->vc_x;
-
 	if (!vc_cons_allocated(fg_console)) {
 		/* impossible */
 		/* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
@@ -2919,53 +2915,41 @@
 		hide_cursor(vc);
 
 	start = (ushort *)vc->vc_pos;
-
-	/* Contrived structure to try to emulate original need_wrap behaviour
-	 * Problems caused when we have need_wrap set on '\n' character */
+	start_x = vc->vc_x;
+	cnt = 0;
 	while (count--) {
 		c = *b++;
 		if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
-			if (cnt > 0) {
-				if (con_is_visible(vc))
-					vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
-				vc->vc_x += cnt;
-				if (vc->vc_need_wrap)
-					vc->vc_x--;
-				cnt = 0;
-			}
+			if (cnt && con_is_visible(vc))
+				vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
+			cnt = 0;
 			if (c == 8) {		/* backspace */
 				bs(vc);
 				start = (ushort *)vc->vc_pos;
-				myx = vc->vc_x;
+				start_x = vc->vc_x;
 				continue;
 			}
 			if (c != 13)
 				lf(vc);
 			cr(vc);
 			start = (ushort *)vc->vc_pos;
-			myx = vc->vc_x;
+			start_x = vc->vc_x;
 			if (c == 10 || c == 13)
 				continue;
 		}
+		vc_uniscr_putc(vc, c);
 		scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
 		notify_write(vc, c);
 		cnt++;
-		if (myx == vc->vc_cols - 1) {
+		if (vc->vc_x == vc->vc_cols - 1) {
 			vc->vc_need_wrap = 1;
-			continue;
-		}
-		vc->vc_pos += 2;
-		myx++;
-	}
-	if (cnt > 0) {
-		if (con_is_visible(vc))
-			vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
-		vc->vc_x += cnt;
-		if (vc->vc_x == vc->vc_cols) {
-			vc->vc_x--;
-			vc->vc_need_wrap = 1;
+		} else {
+			vc->vc_pos += 2;
+			vc->vc_x++;
 		}
 	}
+	if (cnt && con_is_visible(vc))
+		vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
 	set_cursor(vc);
 	notify_update(vc);
 
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 953dff4..3f2aa75 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -207,8 +207,4 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called ulpi.
 
-config USB_ROLE_SWITCH
-	tristate
-	select USB_COMMON
-
 endif # USB_SUPPORT
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 40c64c7..08b8aa5 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -581,6 +581,13 @@
 	if (retval)
 		goto error_init_termios;
 
+	/*
+	 * Suppress initial echoing for some devices which might send data
+	 * immediately after acm driver has been installed.
+	 */
+	if (acm->quirks & DISABLE_ECHO)
+		tty->termios.c_lflag &= ~ECHO;
+
 	tty->driver_data = acm;
 
 	return 0;
@@ -1672,6 +1679,9 @@
 	{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
 	},
+	{ USB_DEVICE(0x0e8d, 0x2000), /* MediaTek Inc Preloader */
+	.driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
+	},
 	{ USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
 	},
@@ -1870,6 +1880,13 @@
 	.driver_info = IGNORE_DEVICE,
 	},
 
+	{ USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
+	.driver_info = SEND_ZERO_PACKET,
+	},
+	{ USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */
+	.driver_info = SEND_ZERO_PACKET,
+	},
+
 	/* control interfaces without any protocol set */
 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
 		USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index ca06b20..515aad0 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -140,3 +140,4 @@
 #define QUIRK_CONTROL_LINE_STATE	BIT(6)
 #define CLEAR_HALT_CONDITIONS		BIT(7)
 #define SEND_ZERO_PACKET		BIT(8)
+#define DISABLE_ECHO			BIT(9)
diff --git a/drivers/usb/common/Makefile b/drivers/usb/common/Makefile
index fb4d5ef..0a7c45e 100644
--- a/drivers/usb/common/Makefile
+++ b/drivers/usb/common/Makefile
@@ -9,4 +9,3 @@
 
 obj-$(CONFIG_USB_OTG_FSM) += usb-otg-fsm.o
 obj-$(CONFIG_USB_ULPI_BUS)	+= ulpi.o
-obj-$(CONFIG_USB_ROLE_SWITCH)	+= roles.o
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index dc7f7fd..c12ac56 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -119,11 +119,6 @@
 	.attrs = ports_attrs,
 };
 
-static const struct attribute_group *ports_groups[] = {
-	&ports_group,
-	NULL
-};
-
 /***************************************
  * Adding & removing ports
  ***************************************/
@@ -307,6 +302,7 @@
 static int usbport_trig_activate(struct led_classdev *led_cdev)
 {
 	struct usbport_trig_data *usbport_data;
+	int err;
 
 	usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
 	if (!usbport_data)
@@ -315,6 +311,9 @@
 
 	/* List of ports */
 	INIT_LIST_HEAD(&usbport_data->ports);
+	err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
+	if (err)
+		goto err_free;
 	usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
 	usbport_trig_update_count(usbport_data);
 
@@ -322,8 +321,11 @@
 	usbport_data->nb.notifier_call = usbport_trig_notify;
 	led_set_trigger_data(led_cdev, usbport_data);
 	usb_register_notify(&usbport_data->nb);
-
 	return 0;
+
+err_free:
+	kfree(usbport_data);
+	return err;
 }
 
 static void usbport_trig_deactivate(struct led_classdev *led_cdev)
@@ -335,6 +337,8 @@
 		usbport_trig_remove_port(usbport_data, port);
 	}
 
+	sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
+
 	usb_unregister_notify(&usbport_data->nb);
 
 	kfree(usbport_data);
@@ -344,7 +348,6 @@
 	.name     = "usbport",
 	.activate = usbport_trig_activate,
 	.deactivate = usbport_trig_deactivate,
-	.groups = ports_groups,
 };
 
 static int __init usbport_trig_init(void)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 514c521..8bc35d5 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -394,7 +394,8 @@
 	{ USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
 
 	/* Corsair K70 RGB */
-	{ USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+	{ USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT |
+	  USB_QUIRK_DELAY_CTRL_MSG },
 
 	/* Corsair Strafe */
 	{ USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 3f9bccc..c089ffa 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -366,7 +366,7 @@
 	u32 desc_list_sz;
 	u32 *n_bytes;
 	struct timer_list unreserve_timer;
-	struct timer_list wait_timer;
+	struct hrtimer wait_timer;
 	struct dwc2_tt *dwc_tt;
 	int ttport;
 	unsigned tt_buffer_dirty:1;
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 40839591..ea3aa64 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -59,7 +59,7 @@
 #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
 
 /* If we get a NAK, wait this long before retrying */
-#define DWC2_RETRY_WAIT_DELAY (msecs_to_jiffies(1))
+#define DWC2_RETRY_WAIT_DELAY 1*1E6L
 
 /**
  * dwc2_periodic_channel_available() - Checks that a channel is available for a
@@ -1464,10 +1464,12 @@
  * qh back to the "inactive" list, then queues transactions.
  *
  * @t: Pointer to wait_timer in a qh.
+ *
+ * Return: HRTIMER_NORESTART to not automatically restart this timer.
  */
-static void dwc2_wait_timer_fn(struct timer_list *t)
+static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t)
 {
-	struct dwc2_qh *qh = from_timer(qh, t, wait_timer);
+	struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
 	struct dwc2_hsotg *hsotg = qh->hsotg;
 	unsigned long flags;
 
@@ -1491,6 +1493,7 @@
 	}
 
 	spin_unlock_irqrestore(&hsotg->lock, flags);
+	return HRTIMER_NORESTART;
 }
 
 /**
@@ -1521,7 +1524,8 @@
 	/* Initialize QH */
 	qh->hsotg = hsotg;
 	timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
-	timer_setup(&qh->wait_timer, dwc2_wait_timer_fn, 0);
+	hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	qh->wait_timer.function = &dwc2_wait_timer_fn;
 	qh->ep_type = ep_type;
 	qh->ep_is_in = ep_is_in;
 
@@ -1690,7 +1694,7 @@
 	 * won't do anything anyway, but we want it to finish before we free
 	 * memory.
 	 */
-	del_timer_sync(&qh->wait_timer);
+	hrtimer_cancel(&qh->wait_timer);
 
 	dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
 
@@ -1716,6 +1720,7 @@
 {
 	int status;
 	u32 intr_mask;
+	ktime_t delay;
 
 	if (dbg_qh(qh))
 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
@@ -1734,8 +1739,8 @@
 			list_add_tail(&qh->qh_list_entry,
 				      &hsotg->non_periodic_sched_waiting);
 			qh->wait_timer_cancel = false;
-			mod_timer(&qh->wait_timer,
-				  jiffies + DWC2_RETRY_WAIT_DELAY + 1);
+			delay = ktime_set(0, DWC2_RETRY_WAIT_DELAY);
+			hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
 		} else {
 			list_add_tail(&qh->qh_list_entry,
 				      &hsotg->non_periodic_sched_inactive);
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index bf7052e..ef2c199 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -110,6 +110,7 @@
 	p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
 	p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
 		GAHBCFG_HBSTLEN_SHIFT;
+	p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
 }
 
 static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 1f9f05f..181a0498 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -188,18 +188,41 @@
 	BUS_VOTE_MAX
 };
 
-struct usb_irq {
-	char *name;
-	int irq;
-	bool enable;
+struct usb_irq_info {
+	const char	*name;
+	unsigned long	irq_type;
+	bool		required;
 };
 
-static const struct usb_irq usb_irq_info[USB_MAX_IRQ] = {
-	{"hs_phy_irq", 0},
-	{"pwr_event_irq", 0},
-	{"dp_hs_phy_irq", 0},
-	{"dm_hs_phy_irq", 0},
-	{"ss_phy_irq", 0},
+static const struct usb_irq_info usb_irq_info[USB_MAX_IRQ] = {
+	{ "hs_phy_irq",
+	  IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQ_TYPE_LEVEL_HIGH |
+		 IRQF_EARLY_RESUME,
+	  false,
+	},
+	{ "pwr_event_irq",
+	  IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQ_TYPE_LEVEL_HIGH |
+		 IRQF_EARLY_RESUME,
+	  true,
+	},
+	{ "dp_hs_phy_irq",
+	  IRQF_TRIGGER_RISING | IRQF_ONESHOT | IRQF_EARLY_RESUME,
+	  false,
+	},
+	{ "dm_hs_phy_irq",
+	  IRQF_TRIGGER_RISING | IRQF_ONESHOT | IRQF_EARLY_RESUME,
+	  false,
+	},
+	{ "ss_phy_irq",
+	  IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQ_TYPE_LEVEL_HIGH |
+		 IRQF_EARLY_RESUME,
+	  false,
+	},
+};
+
+struct usb_irq {
+	int irq;
+	bool enable;
 };
 
 static const char * const gsi_op_strings[] = {
@@ -2722,9 +2745,7 @@
 
 	/* Check speed and Type-C polarity values in order to configure PHY */
 	if (edev && extcon_get_state(edev, extcon_id)) {
-		/* Use default dwc->maximum_speed if speed isn't reported */
-		if (dwc->maximum_speed > dwc->max_hw_supp_speed)
-			dwc->maximum_speed = dwc->max_hw_supp_speed;
+		dwc->maximum_speed = dwc->max_hw_supp_speed;
 
 		ret = extcon_get_property(edev, extcon_id,
 				EXTCON_PROP_USB_SS, &val);
@@ -2732,12 +2753,12 @@
 		if (!ret && val.intval == 0)
 			dwc->maximum_speed = USB_SPEED_HIGH;
 
-		if (mdwc->override_usb_speed) {
+		if (mdwc->override_usb_speed &&
+				mdwc->override_usb_speed < dwc->maximum_speed) {
 			dwc->maximum_speed = mdwc->override_usb_speed;
 			dwc->gadget.max_speed = dwc->maximum_speed;
 			dbg_event(0xFF, "override_speed",
 					mdwc->override_usb_speed);
-			mdwc->override_usb_speed = 0;
 		}
 
 		dbg_event(0xFF, "speed", dwc->maximum_speed);
@@ -3294,6 +3315,8 @@
 			req_speed <= dwc->max_hw_supp_speed) {
 		mdwc->override_usb_speed = req_speed;
 		schedule_work(&mdwc->restart_usb_work);
+	} else if (req_speed >= dwc->max_hw_supp_speed) {
+		mdwc->override_usb_speed = 0;
 	}
 
 	return count;
@@ -3416,7 +3439,6 @@
 	struct resource *res;
 	int ret = 0, size = 0, i;
 	u32 val;
-	unsigned long irq_type;
 
 	mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
 	if (!mdwc)
@@ -3471,18 +3493,14 @@
 		mdwc->lpm_to_suspend_delay = 0;
 	}
 
-	memcpy(mdwc->wakeup_irq, usb_irq_info, sizeof(usb_irq_info));
 	for (i = 0; i < USB_MAX_IRQ; i++) {
-		irq_type = IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
-			IRQ_TYPE_LEVEL_HIGH | IRQF_EARLY_RESUME;
 		mdwc->wakeup_irq[i].irq = platform_get_irq_byname(pdev,
-					mdwc->wakeup_irq[i].name);
+					usb_irq_info[i].name);
 		if (mdwc->wakeup_irq[i].irq < 0) {
 			/* pwr_evnt_irq is only mandatory irq */
-			if (!strcmp(mdwc->wakeup_irq[i].name,
-						"pwr_event_irq")) {
+			if (usb_irq_info[i].required) {
 				dev_err(&pdev->dev, "get_irq for %s failed\n\n",
-						mdwc->wakeup_irq[i].name);
+						usb_irq_info[i].name);
 				ret = -EINVAL;
 				goto err;
 			}
@@ -3490,15 +3508,16 @@
 		} else {
 			irq_set_status_flags(mdwc->wakeup_irq[i].irq,
 						IRQ_NOAUTOEN);
+
 			ret = devm_request_threaded_irq(&pdev->dev,
 					mdwc->wakeup_irq[i].irq,
 					msm_dwc3_pwr_irq,
 					msm_dwc3_pwr_irq_thread,
-					irq_type,
-					mdwc->wakeup_irq[i].name, mdwc);
+					usb_irq_info[i].irq_type,
+					usb_irq_info[i].name, mdwc);
 			if (ret) {
 				dev_err(&pdev->dev, "irq req %s failed: %d\n\n",
-						mdwc->wakeup_irq[i].name, ret);
+						usb_irq_info[i].name, ret);
 				goto err;
 			}
 		}
@@ -3756,6 +3775,7 @@
 	return 0;
 
 put_dwc3:
+	platform_device_put(mdwc->dwc3);
 	if (mdwc->bus_perf_client)
 		msm_bus_scale_unregister_client(mdwc->bus_perf_client);
 
@@ -3812,6 +3832,7 @@
 
 	if (mdwc->hs_phy)
 		mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+	platform_device_put(mdwc->dwc3);
 	of_platform_depopulate(&pdev->dev);
 
 	dbg_event(0xFF, "Remov put", 0);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 8427958..fdc6e4e 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -170,20 +170,20 @@
 			 * put the gpio descriptors again here because the phy driver
 			 * might want to grab them, too.
 			 */
-			gpio = devm_gpiod_get_optional(&pdev->dev, "cs",
-						       GPIOD_OUT_LOW);
+			gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
 			if (IS_ERR(gpio))
 				return PTR_ERR(gpio);
 
 			gpiod_set_value_cansleep(gpio, 1);
+			gpiod_put(gpio);
 
-			gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
-						       GPIOD_OUT_LOW);
+			gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
 			if (IS_ERR(gpio))
 				return PTR_ERR(gpio);
 
 			if (gpio) {
 				gpiod_set_value_cansleep(gpio, 1);
+				gpiod_put(gpio);
 				usleep_range(10000, 11000);
 			}
 		}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index d7a1401..abe7f64 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -284,6 +284,8 @@
 	req->started = false;
 	list_del(&req->list);
 	req->remaining = 0;
+	req->unaligned = false;
+	req->zero = false;
 
 	if (req->request.status == -EINPROGRESS)
 		req->request.status = status;
@@ -1027,6 +1029,9 @@
 {
 	u8 tmp = index;
 
+	if (!dep->trb_pool)
+		return NULL;
+
 	if (!tmp)
 		tmp = DWC3_TRB_NUM - 1;
 
@@ -1694,7 +1699,11 @@
 		else
 			trb = &dwc->ep0_trb[dep->trb_enqueue];
 
-		transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
+		if (trb)
+			transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
+		else
+			transfer_in_flight = false;
+
 		started = !list_empty(&dep->started_list);
 
 		if (!protocol && ((dep->direction && transfer_in_flight) ||
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 382dd21..7c2b88d 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -954,6 +954,9 @@
 
 	/* when we return, be sure our power usage is valid */
 	power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
+	if (gadget->speed < USB_SPEED_SUPER)
+		power = min(power, 500U);
+
 done:
 	usb_gadget_vbus_draw(gadget, power);
 	if (result >= 0 && cdev->delayed_status)
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 67d8a50..fea02c7 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -358,6 +358,7 @@
 	bool extcon_host;		/* check id and set EXTCON_USB_HOST */
 	bool extcon_usb;		/* check vbus and set EXTCON_USB */
 	bool forced_b_device;
+	bool start_to_connect;
 };
 
 #define gadget_to_renesas_usb3(_gadget)	\
@@ -476,7 +477,8 @@
 static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
 {
 	usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
-	usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
+	if (!usb3->workaround_for_vbus)
+		usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
 }
 
 static bool usb3_wakeup_usb2_phy(struct renesas_usb3 *usb3)
@@ -700,8 +702,7 @@
 	usb3_set_mode_by_role_sw(usb3, host);
 	usb3_vbus_out(usb3, a_dev);
 	/* for A-Peripheral or forced B-device mode */
-	if ((!host && a_dev) ||
-	    (usb3->workaround_for_vbus && usb3->forced_b_device))
+	if ((!host && a_dev) || usb3->start_to_connect)
 		usb3_connect(usb3);
 	spin_unlock_irqrestore(&usb3->lock, flags);
 }
@@ -2432,7 +2433,11 @@
 	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
 		return -EFAULT;
 
-	if (!strncmp(buf, "1", 1))
+	usb3->start_to_connect = false;
+	if (usb3->workaround_for_vbus && usb3->forced_b_device &&
+	    !strncmp(buf, "2", 1))
+		usb3->start_to_connect = true;
+	else if (!strncmp(buf, "1", 1))
 		usb3->forced_b_device = true;
 	else
 		usb3->forced_b_device = false;
@@ -2440,7 +2445,7 @@
 	if (usb3->workaround_for_vbus)
 		usb3_disconnect(usb3);
 
-	/* Let this driver call usb3_connect() anyway */
+	/* Let this driver call usb3_connect() if needed */
 	usb3_check_id(usb3);
 
 	return count;
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 984892d..42668ae 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1979,6 +1979,8 @@
 
 static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
 				      struct usb_host_endpoint *hep)
+__acquires(r8a66597->lock)
+__releases(r8a66597->lock)
 {
 	struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
 	struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv;
@@ -1991,13 +1993,14 @@
 		return;
 	pipenum = pipe->info.pipenum;
 
+	spin_lock_irqsave(&r8a66597->lock, flags);
 	if (pipenum == 0) {
 		kfree(hep->hcpriv);
 		hep->hcpriv = NULL;
+		spin_unlock_irqrestore(&r8a66597->lock, flags);
 		return;
 	}
 
-	spin_lock_irqsave(&r8a66597->lock, flags);
 	pipe_stop(r8a66597, pipe);
 	pipe_irq_disable(r8a66597, pipenum);
 	disable_irq_empty(r8a66597, pipenum);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 61b5e41..53184b6 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1659,7 +1659,8 @@
 		portsc_buf[port_index] = 0;
 
 		/* Bail out if a USB3 port has a new device in link training */
-		if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+		if ((hcd->speed >= HCD_USB3) &&
+		    (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
 			bus_state->bus_suspended = 0;
 			spin_unlock_irqrestore(&xhci->lock, flags);
 			xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 3c556ae..3a0a47f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1869,6 +1869,8 @@
 	unsigned		sw_lpm_support:1;
 	/* support xHCI 1.0 spec USB2 hardware LPM */
 	unsigned		hw_lpm_support:1;
+	/* Broken Suspend flag for SNPS Suspend resume issue */
+	unsigned		broken_suspend:1;
 	/* cached usb2 extened protocol capabilites */
 	u32                     *ext_caps;
 	unsigned int            num_ext_caps;
@@ -1886,8 +1888,6 @@
 	void			*dbc;
 	/* platform-specific data -- must come last */
 	unsigned long		priv[0] __aligned(sizeof(s64));
-	/* Broken Suspend flag for SNPS Suspend resume issue */
-	u8			broken_suspend;
 };
 
 /* Platform specific overrides to generic XHCI hc_driver ops */
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index bbc48d7..a9253d9 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -457,6 +457,7 @@
 	struct mutex		svid_handler_lock;
 	struct list_head	svid_handlers;
 	ktime_t			svdm_start_time;
+	bool			vdm_in_suspend;
 
 	struct list_head	instance;
 
@@ -659,15 +660,21 @@
 {
 	struct usbpd_svid_handler *handler;
 
-	mutex_lock(&pd->svid_handler_lock);
+	/* in_interrupt() == true when handling VDM RX during suspend */
+	if (!in_interrupt())
+		mutex_lock(&pd->svid_handler_lock);
+
 	list_for_each_entry(handler, &pd->svid_handlers, entry) {
 		if (svid == handler->svid) {
-			mutex_unlock(&pd->svid_handler_lock);
+			if (!in_interrupt())
+				mutex_unlock(&pd->svid_handler_lock);
 			return handler;
 		}
 	}
 
-	mutex_unlock(&pd->svid_handler_lock);
+	if (!in_interrupt())
+		mutex_unlock(&pd->svid_handler_lock);
+
 	return NULL;
 }
 
@@ -900,10 +907,12 @@
 	pm_stay_awake(&pd->dev);
 	pd->sm_queued = true;
 
-	if (ms)
+	if (ms) {
+		usbpd_dbg(&pd->dev, "delay %d ms", ms);
 		hrtimer_start(&pd->timer, ms_to_ktime(ms), HRTIMER_MODE_REL);
-	else
+	} else {
 		queue_work(pd->wq, &pd->sm_work);
+	}
 }
 
 static void phy_sig_received(struct usbpd *pd, enum pd_sig_type sig)
@@ -1072,6 +1081,8 @@
 	return rx_msg;	/* queue it for usbpd_sm */
 }
 
+static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg);
+
 static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
 		u8 *buf, size_t len)
 {
@@ -1144,6 +1155,13 @@
 			return;
 	}
 
+	if (pd->vdm_in_suspend && msg_type == MSG_VDM) {
+		usbpd_dbg(&pd->dev, "Skip wq and handle VDM directly\n");
+		handle_vdm_rx(pd, rx_msg);
+		kfree(rx_msg);
+		return;
+	}
+
 	spin_lock_irqsave(&pd->rx_lock, flags);
 	list_add_tail(&rx_msg->entry, &pd->rx_q);
 	spin_unlock_irqrestore(&pd->rx_lock, flags);
@@ -1170,7 +1188,6 @@
 {
 	struct usbpd *pd = container_of(timer, struct usbpd, timer);
 
-	usbpd_dbg(&pd->dev, "timeout");
 	queue_work(pd->wq, &pd->sm_work);
 
 	return HRTIMER_NORESTART;
@@ -1304,8 +1321,13 @@
 {
 	struct vdm_tx *vdm_tx;
 
-	if (pd->vdm_tx)
-		return -EBUSY;
+	if (pd->vdm_tx) {
+		usbpd_warn(&pd->dev, "Discarding previously queued VDM tx (SVID:0x%04x)\n",
+				VDM_HDR_SVID(pd->vdm_tx->data[0]));
+
+		kfree(pd->vdm_tx);
+		pd->vdm_tx = NULL;
+	}
 
 	vdm_tx = kzalloc(sizeof(*vdm_tx), GFP_KERNEL);
 	if (!vdm_tx)
@@ -1318,6 +1340,7 @@
 
 	/* VDM will get sent in PE_SRC/SNK_READY state handling */
 	pd->vdm_tx = vdm_tx;
+	pd->vdm_in_suspend = false;
 
 	/* slight delay before queuing to prioritize handling of incoming VDM */
 	if (pd->in_explicit_contract)
@@ -1340,6 +1363,14 @@
 }
 EXPORT_SYMBOL(usbpd_send_svdm);
 
+void usbpd_vdm_in_suspend(struct usbpd *pd, bool in_suspend)
+{
+	usbpd_dbg(&pd->dev, "VDM in_suspend:%d\n", in_suspend);
+
+	pd->vdm_in_suspend = in_suspend;
+}
+EXPORT_SYMBOL(usbpd_vdm_in_suspend);
+
 static void handle_vdm_resp_ack(struct usbpd *pd, u32 *vdos, u8 num_vdos,
 	u16 vdm_hdr)
 {
@@ -1523,17 +1554,17 @@
 		return;
 	}
 
-	/* if this interrupts a previous exchange, abort queued response */
-	if (cmd_type == SVDM_CMD_TYPE_INITIATOR && pd->vdm_tx) {
-		usbpd_dbg(&pd->dev, "Discarding previously queued SVDM tx (SVID:0x%04x)\n",
-				VDM_HDR_SVID(pd->vdm_tx->data[0]));
-
-		kfree(pd->vdm_tx);
-		pd->vdm_tx = NULL;
-	}
+	if (cmd_type != SVDM_CMD_TYPE_INITIATOR &&
+			pd->current_state != PE_SRC_STARTUP_WAIT_FOR_VDM_RESP)
+		start_src_ams(pd, false);
 
 	if (handler && handler->svdm_received) {
 		handler->svdm_received(handler, cmd, cmd_type, vdos, num_vdos);
+
+		/* handle any previously queued TX */
+		if (pd->vdm_tx && !pd->sm_queued)
+			kick_sm(pd, 0);
+
 		return;
 	}
 
@@ -1683,6 +1714,7 @@
 	kfree(pd->vdm_tx);
 	pd->vdm_tx = NULL;
 	pd->ss_lane_svid = 0x0;
+	pd->vdm_in_suspend = false;
 }
 
 static void handle_get_src_cap_extended(struct usbpd *pd)
@@ -2298,7 +2330,7 @@
 
 	pd->in_explicit_contract = true;
 
-	if (pd->vdm_tx)
+	if (pd->vdm_tx && !pd->sm_queued)
 		kick_sm(pd, 0);
 	else if (pd->current_dr == DR_DFP && pd->vdm_state == VDM_NONE)
 		usbpd_send_svdm(pd, USBPD_SID,
@@ -2359,8 +2391,6 @@
 		}
 
 		vconn_swap(pd);
-		if (!pd->vdm_tx)
-			start_src_ams(pd, false);
 	} else if (IS_DATA(rx_msg, MSG_VDM)) {
 		handle_vdm_rx(pd, rx_msg);
 	} else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP_EXTENDED)) {
diff --git a/drivers/usb/phy/phy-msm-snps-hs.c b/drivers/usb/phy/phy-msm-snps-hs.c
index 2db22943..994a884 100644
--- a/drivers/usb/phy/phy-msm-snps-hs.c
+++ b/drivers/usb/phy/phy-msm-snps-hs.c
@@ -26,11 +26,9 @@
 #define SLEEPM					BIT(0)
 
 #define USB2_PHY_USB_PHY_UTMI_CTRL5		(0x50)
-#define ATERESET				BIT(0)
 #define POR					BIT(1)
 
 #define USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON0	(0x54)
-#define VATESTENB_MASK				(0x3 << 0)
 #define RETENABLEN				BIT(3)
 #define FSEL_MASK				(0x7 << 4)
 #define FSEL_DEFAULT				(0x3 << 4)
@@ -49,13 +47,6 @@
 #define USB2_SUSPEND_N				BIT(2)
 #define USB2_SUSPEND_N_SEL			BIT(3)
 
-#define USB2_PHY_USB_PHY_HS_PHY_TEST0		(0x80)
-#define TESTDATAIN_MASK				(0xff << 0)
-
-#define USB2_PHY_USB_PHY_HS_PHY_TEST1		(0x84)
-#define TESTDATAOUTSEL				BIT(4)
-#define TOGGLE_2WR				BIT(6)
-
 #define USB2_PHY_USB_PHY_CFG0			(0x94)
 #define UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN	BIT(0)
 #define UTMI_PHY_CMN_CTRL_OVERRIDE_EN		BIT(1)
@@ -372,7 +363,8 @@
 	msm_hsphy_reset(phy);
 
 	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0,
-	UTMI_PHY_CMN_CTRL_OVERRIDE_EN, UTMI_PHY_CMN_CTRL_OVERRIDE_EN);
+				UTMI_PHY_CMN_CTRL_OVERRIDE_EN,
+				UTMI_PHY_CMN_CTRL_OVERRIDE_EN);
 
 	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL5,
 				POR, POR);
@@ -412,26 +404,9 @@
 	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON2,
 				VREGBYPASS, VREGBYPASS);
 
-	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL5,
-				ATERESET, ATERESET);
-
-	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_TEST1,
-				TESTDATAOUTSEL, TESTDATAOUTSEL);
-
-	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_TEST1,
-				TOGGLE_2WR, TOGGLE_2WR);
-
-	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON0,
-				VATESTENB_MASK, 0);
-
-	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_TEST0,
-				TESTDATAIN_MASK, 0);
-
 	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2,
-				USB2_SUSPEND_N_SEL, USB2_SUSPEND_N_SEL);
-
-	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2,
-				USB2_SUSPEND_N, USB2_SUSPEND_N);
+				USB2_SUSPEND_N_SEL | USB2_SUSPEND_N,
+				USB2_SUSPEND_N_SEL | USB2_SUSPEND_N);
 
 	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL0,
 				SLEEPM, SLEEPM);
@@ -443,7 +418,7 @@
 				USB2_SUSPEND_N_SEL, 0);
 
 	msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0,
-	UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 0);
+				UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 0);
 
 	return 0;
 }
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 2dfbbd9..cfe9914 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -29,7 +29,7 @@
 /* default CORE votlage and load values */
 #define USB_SSPHY_1P2_VOL_MIN		1200000 /* uV */
 #define USB_SSPHY_1P2_VOL_MAX		1200000 /* uV */
-#define USB_SSPHY_HPM_LOAD		23000	/* uA */
+#define USB_SSPHY_HPM_LOAD		30000	/* uA */
 
 /* USB3PHY_PCIE_USB3_PCS_PCS_STATUS bit */
 #define PHYSTATUS				BIT(6)
@@ -118,8 +118,10 @@
 
 	struct regulator	*vdd;
 	int			vdd_levels[3]; /* none, low, high */
+	int			vdd_max_uA;
 	struct regulator	*core_ldo;
 	int			core_voltage_levels[3];
+	int			core_max_uA;
 	struct clk		*ref_clk_src;
 	struct clk		*ref_clk;
 	struct clk		*aux_clk;
@@ -248,11 +250,17 @@
 	if (!on)
 		goto disable_regulators;
 
+	rc = regulator_set_load(phy->vdd, phy->vdd_max_uA);
+	if (rc < 0) {
+		dev_err(phy->phy.dev, "Unable to set HPM of %s\n", "vdd");
+		return rc;
+	}
+
 	rc = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
 				    phy->vdd_levels[2]);
 	if (rc) {
-		dev_err(phy->phy.dev, "unable to set voltage for ssusb vdd\n");
-		return rc;
+		dev_err(phy->phy.dev, "Unable to set voltage for %s\n", "vdd");
+		goto put_vdd_lpm;
 	}
 
 	dev_dbg(phy->phy.dev, "min_vol:%d max_vol:%d\n",
@@ -260,15 +268,13 @@
 
 	rc = regulator_enable(phy->vdd);
 	if (rc) {
-		dev_err(phy->phy.dev,
-			"regulator_enable(phy->vdd) failed, ret=%d",
-			rc);
+		dev_err(phy->phy.dev, "Unable to enable %s\n", "vdd");
 		goto unconfig_vdd;
 	}
 
-	rc = regulator_set_load(phy->core_ldo, USB_SSPHY_HPM_LOAD);
+	rc = regulator_set_load(phy->core_ldo, phy->core_max_uA);
 	if (rc < 0) {
-		dev_err(phy->phy.dev, "Unable to set HPM of core_ldo\n");
+		dev_err(phy->phy.dev, "Unable to set HPM of %s\n", "core_ldo");
 		goto disable_vdd;
 	}
 
@@ -276,13 +282,14 @@
 			phy->core_voltage_levels[CORE_LEVEL_MIN],
 			phy->core_voltage_levels[CORE_LEVEL_MAX]);
 	if (rc) {
-		dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n");
+		dev_err(phy->phy.dev, "Unable to set voltage for %s\n",
+				"core_ldo");
 		goto put_core_ldo_lpm;
 	}
 
 	rc = regulator_enable(phy->core_ldo);
 	if (rc) {
-		dev_err(phy->phy.dev, "Unable to enable core_ldo\n");
+		dev_err(phy->phy.dev, "Unable to enable %s\n", "core_ldo");
 		goto unset_core_ldo;
 	}
 
@@ -291,31 +298,36 @@
 disable_regulators:
 	rc = regulator_disable(phy->core_ldo);
 	if (rc)
-		dev_err(phy->phy.dev, "Unable to disable core_ldo\n");
+		dev_err(phy->phy.dev, "Unable to disable %s\n", "core_ldo");
 
 unset_core_ldo:
 	rc = regulator_set_voltage(phy->core_ldo,
 			phy->core_voltage_levels[CORE_LEVEL_NONE],
 			phy->core_voltage_levels[CORE_LEVEL_MAX]);
 	if (rc)
-		dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n");
+		dev_err(phy->phy.dev, "Unable to set voltage for %s\n",
+				"core_ldo");
 
 put_core_ldo_lpm:
 	rc = regulator_set_load(phy->core_ldo, 0);
 	if (rc < 0)
-		dev_err(phy->phy.dev, "Unable to set LPM of core_ldo\n");
+		dev_err(phy->phy.dev, "Unable to set LPM of %s\n", "core_ldo");
 
 disable_vdd:
 	rc = regulator_disable(phy->vdd);
 	if (rc)
-		dev_err(phy->phy.dev, "regulator_disable(phy->vdd) failed, ret=%d",
-			rc);
+		dev_err(phy->phy.dev, "Unable to disable %s\n", "vdd");
 
 unconfig_vdd:
 	rc = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
 				    phy->vdd_levels[2]);
 	if (rc)
-		dev_err(phy->phy.dev, "unable to set voltage for ssusb vdd\n");
+		dev_err(phy->phy.dev, "Unable to set voltage for %s\n", "vdd");
+
+put_vdd_lpm:
+	rc = regulator_set_load(phy->vdd, 0);
+	if (rc < 0)
+		dev_err(phy->phy.dev, "Unable to set LPM of %s\n", "vdd");
 
 	return rc < 0 ? rc : 0;
 }
@@ -1109,6 +1121,10 @@
 		}
 	}
 
+	if (of_property_read_s32(dev->of_node, "qcom,core-max-load-uA",
+				&phy->core_max_uA) || !phy->core_max_uA)
+		phy->core_max_uA = USB_SSPHY_HPM_LOAD;
+
 	if (of_get_property(dev->of_node, "qcom,vdd-voltage-level", &len) &&
 		len == sizeof(phy->vdd_levels)) {
 		ret = of_property_read_u32_array(dev->of_node,
@@ -1125,6 +1141,10 @@
 		goto err;
 	}
 
+	if (of_property_read_s32(dev->of_node, "qcom,vdd-max-load-uA",
+				&phy->vdd_max_uA) || !phy->vdd_max_uA)
+		phy->vdd_max_uA = USB_SSPHY_HPM_LOAD;
+
 	phy->vdd = devm_regulator_get(dev, "vdd");
 	if (IS_ERR(phy->vdd)) {
 		dev_err(dev, "unable to get vdd supply\n");
diff --git a/drivers/usb/roles/Kconfig b/drivers/usb/roles/Kconfig
index f5a5e6f..e4194ac 100644
--- a/drivers/usb/roles/Kconfig
+++ b/drivers/usb/roles/Kconfig
@@ -1,3 +1,16 @@
+config USB_ROLE_SWITCH
+	tristate "USB Role Switch Support"
+	help
+	  USB Role Switch is a device that can select the USB role - host or
+	  device - for a USB port (connector). In most cases dual-role capable
+	  USB controller will also represent the switch, but on some platforms
+	  multiplexer/demultiplexer switch is used to route the data lines on
+	  the USB connector between separate USB host and device controllers.
+
+	  Say Y here if your USB connectors support both device and host roles.
+	  To compile the driver as module, choose M here: the module will be
+	  called roles.ko.
+
 if USB_ROLE_SWITCH
 
 config USB_ROLES_INTEL_XHCI
diff --git a/drivers/usb/roles/Makefile b/drivers/usb/roles/Makefile
index e44b179..c028732 100644
--- a/drivers/usb/roles/Makefile
+++ b/drivers/usb/roles/Makefile
@@ -1 +1,3 @@
-obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
+obj-$(CONFIG_USB_ROLE_SWITCH)		+= roles.o
+roles-y					:= class.o
+obj-$(CONFIG_USB_ROLES_INTEL_XHCI)	+= intel-xhci-usb-role-switch.o
diff --git a/drivers/usb/common/roles.c b/drivers/usb/roles/class.c
similarity index 100%
rename from drivers/usb/common/roles.c
rename to drivers/usb/roles/class.c
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e72ad9f..fb54434 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1164,6 +1164,10 @@
 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+	{ USB_DEVICE(TELIT_VENDOR_ID, 0x1900),				/* Telit LN940 (QMI) */
+	  .driver_info = NCTRL(0) | RSVD(1) },
+	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),	/* Telit LN940 (MBIM) */
+	  .driver_info = NCTRL(0) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
 	  .driver_info = RSVD(1) },
@@ -1328,6 +1332,7 @@
 	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+	{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) },	/* GosunCn ZTE WeLink ME3630 (MBIM mode) */
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
 	  .driver_info = RSVD(4) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
@@ -1531,6 +1536,7 @@
 	  .driver_info = RSVD(2) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
 	  .driver_info = RSVD(2) },
+	{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) },	/* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1758,6 +1764,7 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
 	  .driver_info = RSVD(5) | RSVD(6) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) },	/* Simcom SIM7500/SIM7600 MBIM mode */
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
 	  .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
@@ -1940,7 +1947,18 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) },	/* HP lt2523 (Novatel E371) */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) },	/* HP lt4132 (Huawei ME906s-158) */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+	{ USB_DEVICE(0x1508, 0x1001),						/* Fibocom NL668 */
+	  .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+	{ USB_DEVICE(0x2cb7, 0x0104),						/* Fibocom NL678 series */
+	  .driver_info = RSVD(4) | RSVD(5) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff),			/* Fibocom NL678 series */
+	  .driver_info = RSVD(6) },
 	{ } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index e41f725..5a6df6e 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -46,6 +46,7 @@
 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
+	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
 	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
@@ -91,9 +92,14 @@
 	{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
 	{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
 	{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
+	{ USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
 	{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
+	{ USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
 	{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
 	{ USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
+	{ USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
+	{ USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
+	{ USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
 	{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
 	{ USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
 	{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 26965cc..559941c 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -8,6 +8,7 @@
 
 #define PL2303_VENDOR_ID	0x067b
 #define PL2303_PRODUCT_ID	0x2303
+#define PL2303_PRODUCT_ID_TB		0x2304
 #define PL2303_PRODUCT_ID_RSAQ2		0x04bb
 #define PL2303_PRODUCT_ID_DCU11		0x1234
 #define PL2303_PRODUCT_ID_PHAROS	0xaaa0
@@ -20,6 +21,7 @@
 #define PL2303_PRODUCT_ID_MOTOROLA	0x0307
 #define PL2303_PRODUCT_ID_ZTEK		0xe1f1
 
+
 #define ATEN_VENDOR_ID		0x0557
 #define ATEN_VENDOR_ID2		0x0547
 #define ATEN_PRODUCT_ID		0x2008
@@ -119,10 +121,15 @@
 
 /* Hewlett-Packard POS Pole Displays */
 #define HP_VENDOR_ID		0x03f0
+#define HP_LM920_PRODUCT_ID	0x026b
+#define HP_TD620_PRODUCT_ID	0x0956
 #define HP_LD960_PRODUCT_ID	0x0b39
 #define HP_LCM220_PRODUCT_ID	0x3139
 #define HP_LCM960_PRODUCT_ID	0x3239
 #define HP_LD220_PRODUCT_ID	0x3524
+#define HP_LD220TA_PRODUCT_ID	0x4349
+#define HP_LD960TA_PRODUCT_ID	0x4439
+#define HP_LM940_PRODUCT_ID	0x5039
 
 /* Cressi Edy (diving computer) PC interface */
 #define CRESSI_VENDOR_ID	0x04b8
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 4d02735..edbbb13 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -85,7 +85,8 @@
 /* Motorola Tetra driver */
 #define MOTOROLA_TETRA_IDS()			\
 	{ USB_DEVICE(0x0cad, 0x9011) },	/* Motorola Solutions TETRA PEI */ \
-	{ USB_DEVICE(0x0cad, 0x9012) }	/* MTP6550 */
+	{ USB_DEVICE(0x0cad, 0x9012) },	/* MTP6550 */ \
+	{ USB_DEVICE(0x0cad, 0x9016) }	/* TPG2200 */
 DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
 
 /* Novatel Wireless GPS driver */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index e227bb5..101ebac 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -235,8 +235,12 @@
 		if (!(us->fflags & US_FL_NEEDS_CAP16))
 			sdev->try_rc_10_first = 1;
 
-		/* assume SPC3 or latter devices support sense size > 18 */
-		if (sdev->scsi_level > SCSI_SPC_2)
+		/*
+		 * assume SPC3 or latter devices support sense size > 18
+		 * unless US_FL_BAD_SENSE quirk is specified.
+		 */
+		if (sdev->scsi_level > SCSI_SPC_2 &&
+		    !(us->fflags & US_FL_BAD_SENSE))
 			us->fflags |= US_FL_SANE_SENSE;
 
 		/*
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index f7f83b21..ea0d27a 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1266,6 +1266,18 @@
 		US_FL_FIX_CAPACITY ),
 
 /*
+ * Reported by Icenowy Zheng <icenowy@aosc.io>
+ * The SMI SM3350 USB-UFS bridge controller will enter a wrong state
+ * that do not process read/write command if a long sense is requested,
+ * so force to use 18-byte sense.
+ */
+UNUSUAL_DEV(  0x090c, 0x3350, 0x0000, 0xffff,
+		"SMI",
+		"SM3350 UFS-to-USB-Mass-Storage bridge",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_BAD_SENSE ),
+
+/*
  * Reported by Paul Hartman <paul.hartman+linux@gmail.com>
  * This card reader returns "Illegal Request, Logical Block Address
  * Out of Range" for the first READ(10) after a new card is inserted.
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index c74cc9c..3457c1f 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -317,6 +317,9 @@
 	/* Deadline in jiffies to exit src_try_wait state */
 	unsigned long max_wait;
 
+	/* port belongs to a self powered device */
+	bool self_powered;
+
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *dentry;
 	struct mutex logbuffer_lock;	/* log buffer access lock */
@@ -3257,7 +3260,8 @@
 	case SRC_HARD_RESET_VBUS_OFF:
 		tcpm_set_vconn(port, true);
 		tcpm_set_vbus(port, false);
-		tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
+		tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
+			       TYPEC_HOST);
 		tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
 		break;
 	case SRC_HARD_RESET_VBUS_ON:
@@ -3270,7 +3274,8 @@
 		memset(&port->pps_data, 0, sizeof(port->pps_data));
 		tcpm_set_vconn(port, false);
 		tcpm_set_charge(port, false);
-		tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
+		tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
+			       TYPEC_DEVICE);
 		/*
 		 * VBUS may or may not toggle, depending on the adapter.
 		 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
@@ -4415,6 +4420,8 @@
 		return -EINVAL;
 	port->operating_snk_mw = mw / 1000;
 
+	port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
+
 	return 0;
 }
 
@@ -4723,6 +4730,7 @@
 	port->typec_caps.prefer_role = tcfg->default_role;
 	port->typec_caps.type = tcfg->type;
 	port->typec_caps.data = tcfg->data;
+	port->self_powered = port->tcpc->config->self_powered;
 
 	return 0;
 }
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index d9fd318..64cbc2d 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -878,7 +878,7 @@
 		return -EINVAL;
 	if (!unmap->size || unmap->size & mask)
 		return -EINVAL;
-	if (unmap->iova + unmap->size < unmap->iova ||
+	if (unmap->iova + unmap->size - 1 < unmap->iova ||
 	    unmap->size > SIZE_MAX)
 		return -EINVAL;
 
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 4e656f8..39155d7 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1024,7 +1024,8 @@
 		if (nvq->done_idx > VHOST_NET_BATCH)
 			vhost_net_signal_used(nvq);
 		if (unlikely(vq_log))
-			vhost_log_write(vq, vq_log, log, vhost_len);
+			vhost_log_write(vq, vq_log, log, vhost_len,
+					vq->iov, in);
 		total_len += vhost_len;
 		if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
 			vhost_poll_queue(&vq->poll);
@@ -1113,7 +1114,8 @@
 		n->vqs[i].rx_ring = NULL;
 		vhost_net_buf_init(&n->vqs[i].rxq);
 	}
-	vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
+	vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
+		       UIO_MAXIOV + VHOST_NET_BATCH);
 
 	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
 	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index e7e3ae1..0cfa925 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1398,7 +1398,7 @@
 		vqs[i] = &vs->vqs[i].vq;
 		vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
 	}
-	vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
+	vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
 
 	vhost_scsi_init_inflight(vs, NULL);
 
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index eb95daa..cf82e72 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -390,9 +390,9 @@
 		vq->indirect = kmalloc_array(UIO_MAXIOV,
 					     sizeof(*vq->indirect),
 					     GFP_KERNEL);
-		vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log),
+		vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
 					GFP_KERNEL);
-		vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads),
+		vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
 					  GFP_KERNEL);
 		if (!vq->indirect || !vq->log || !vq->heads)
 			goto err_nomem;
@@ -414,7 +414,7 @@
 }
 
 void vhost_dev_init(struct vhost_dev *dev,
-		    struct vhost_virtqueue **vqs, int nvqs)
+		    struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
 {
 	struct vhost_virtqueue *vq;
 	int i;
@@ -427,6 +427,7 @@
 	dev->iotlb = NULL;
 	dev->mm = NULL;
 	dev->worker = NULL;
+	dev->iov_limit = iov_limit;
 	init_llist_head(&dev->work_list);
 	init_waitqueue_head(&dev->wait);
 	INIT_LIST_HEAD(&dev->read_list);
@@ -1733,13 +1734,87 @@
 	return r;
 }
 
+static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
+{
+	struct vhost_umem *umem = vq->umem;
+	struct vhost_umem_node *u;
+	u64 start, end, l, min;
+	int r;
+	bool hit = false;
+
+	while (len) {
+		min = len;
+		/* More than one GPAs can be mapped into a single HVA. So
+		 * iterate all possible umems here to be safe.
+		 */
+		list_for_each_entry(u, &umem->umem_list, link) {
+			if (u->userspace_addr > hva - 1 + len ||
+			    u->userspace_addr - 1 + u->size < hva)
+				continue;
+			start = max(u->userspace_addr, hva);
+			end = min(u->userspace_addr - 1 + u->size,
+				  hva - 1 + len);
+			l = end - start + 1;
+			r = log_write(vq->log_base,
+				      u->start + start - u->userspace_addr,
+				      l);
+			if (r < 0)
+				return r;
+			hit = true;
+			min = min(l, min);
+		}
+
+		if (!hit)
+			return -EFAULT;
+
+		len -= min;
+		hva += min;
+	}
+
+	return 0;
+}
+
+static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
+{
+	struct iovec iov[64];
+	int i, ret;
+
+	if (!vq->iotlb)
+		return log_write(vq->log_base, vq->log_addr + used_offset, len);
+
+	ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
+			     len, iov, 64, VHOST_ACCESS_WO);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ret; i++) {
+		ret = log_write_hva(vq,	(uintptr_t)iov[i].iov_base,
+				    iov[i].iov_len);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
-		    unsigned int log_num, u64 len)
+		    unsigned int log_num, u64 len, struct iovec *iov, int count)
 {
 	int i, r;
 
 	/* Make sure data written is seen before log. */
 	smp_wmb();
+
+	if (vq->iotlb) {
+		for (i = 0; i < count; i++) {
+			r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
+					  iov[i].iov_len);
+			if (r < 0)
+				return r;
+		}
+		return 0;
+	}
+
 	for (i = 0; i < log_num; ++i) {
 		u64 l = min(log[i].len, len);
 		r = log_write(vq->log_base, log[i].addr, l);
@@ -1769,9 +1844,8 @@
 		smp_wmb();
 		/* Log used flag write. */
 		used = &vq->used->flags;
-		log_write(vq->log_base, vq->log_addr +
-			  (used - (void __user *)vq->used),
-			  sizeof vq->used->flags);
+		log_used(vq, (used - (void __user *)vq->used),
+			 sizeof vq->used->flags);
 		if (vq->log_ctx)
 			eventfd_signal(vq->log_ctx, 1);
 	}
@@ -1789,9 +1863,8 @@
 		smp_wmb();
 		/* Log avail event write */
 		used = vhost_avail_event(vq);
-		log_write(vq->log_base, vq->log_addr +
-			  (used - (void __user *)vq->used),
-			  sizeof *vhost_avail_event(vq));
+		log_used(vq, (used - (void __user *)vq->used),
+			 sizeof *vhost_avail_event(vq));
 		if (vq->log_ctx)
 			eventfd_signal(vq->log_ctx, 1);
 	}
@@ -2191,10 +2264,8 @@
 		/* Make sure data is seen before log. */
 		smp_wmb();
 		/* Log used ring entry write. */
-		log_write(vq->log_base,
-			  vq->log_addr +
-			   ((void __user *)used - (void __user *)vq->used),
-			  count * sizeof *used);
+		log_used(vq, ((void __user *)used - (void __user *)vq->used),
+			 count * sizeof *used);
 	}
 	old = vq->last_used_idx;
 	new = (vq->last_used_idx += count);
@@ -2233,10 +2304,11 @@
 		return -EFAULT;
 	}
 	if (unlikely(vq->log_used)) {
+		/* Make sure used idx is seen before log. */
+		smp_wmb();
 		/* Log used index update. */
-		log_write(vq->log_base,
-			  vq->log_addr + offsetof(struct vring_used, idx),
-			  sizeof vq->used->idx);
+		log_used(vq, offsetof(struct vring_used, idx),
+			 sizeof vq->used->idx);
 		if (vq->log_ctx)
 			eventfd_signal(vq->log_ctx, 1);
 	}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 466ef75..9490e7d 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -170,9 +170,11 @@
 	struct list_head read_list;
 	struct list_head pending_list;
 	wait_queue_head_t wait;
+	int iov_limit;
 };
 
-void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
+void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
+		    int nvqs, int iov_limit);
 long vhost_dev_set_owner(struct vhost_dev *dev);
 bool vhost_dev_has_owner(struct vhost_dev *dev);
 long vhost_dev_check_owner(struct vhost_dev *);
@@ -205,7 +207,8 @@
 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
 
 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
-		    unsigned int log_num, u64 len);
+		    unsigned int log_num, u64 len,
+		    struct iovec *iov, int count);
 int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
 
 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 98ed5be..fa93f67 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -531,7 +531,7 @@
 	vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
 	vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
 
-	vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
+	vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
 
 	file->private_data = vsock;
 	spin_lock_init(&vsock->send_pkt_list_lock);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 09731b2..c6b3bdb 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -271,6 +271,7 @@
 
 static void vgacon_restore_screen(struct vc_data *c)
 {
+	c->vc_origin = c->vc_visible_origin;
 	vgacon_scrollback_cur->save = 0;
 
 	if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
@@ -287,8 +288,7 @@
 	int start, end, count, soff;
 
 	if (!lines) {
-		c->vc_visible_origin = c->vc_origin;
-		vga_set_mem_top(c);
+		vgacon_restore_screen(c);
 		return;
 	}
 
@@ -298,6 +298,7 @@
 	if (!vgacon_scrollback_cur->save) {
 		vgacon_cursor(c, CM_ERASE);
 		vgacon_save_screen(c);
+		c->vc_origin = (unsigned long)c->vc_screenbuf;
 		vgacon_scrollback_cur->save = 1;
 	}
 
@@ -335,7 +336,7 @@
 		int copysize;
 
 		int diff = c->vc_rows - count;
-		void *d = (void *) c->vc_origin;
+		void *d = (void *) c->vc_visible_origin;
 		void *s = (void *) c->vc_screenbuf;
 
 		count *= c->vc_size_row;
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index a3edb20..a846d32 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -609,6 +609,8 @@
 
 	int r = 0;
 
+	memset(&p, 0, sizeof(p));
+
 	switch (cmd) {
 	case OMAPFB_SYNC_GFX:
 		DBG("ioctl SYNC_GFX\n");
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index bbed039..d59c8a5 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2234,10 +2234,8 @@
 	if (!info)
 		return ERR_PTR(-ENOMEM);
 	ret = of_get_pxafb_mode_info(dev, info);
-	if (ret) {
-		kfree(info->modes);
+	if (ret)
 		return ERR_PTR(ret);
-	}
 
 	/*
 	 * On purpose, neither lccrX registers nor video memory size can be
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index e6c1934..fe1f163 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1650,7 +1650,7 @@
 			xen_have_vector_callback = 0;
 			return;
 		}
-		pr_info("Xen HVM callback vector for event delivery is enabled\n");
+		pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
 		alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
 				xen_hvm_callback_vector);
 	}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 38b8ce0..cdbb888 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -104,6 +104,20 @@
 }
 EXPORT_SYMBOL(invalidate_bdev);
 
+static void set_init_blocksize(struct block_device *bdev)
+{
+	unsigned bsize = bdev_logical_block_size(bdev);
+	loff_t size = i_size_read(bdev->bd_inode);
+
+	while (bsize < PAGE_SIZE) {
+		if (size & bsize)
+			break;
+		bsize <<= 1;
+	}
+	bdev->bd_block_size = bsize;
+	bdev->bd_inode->i_blkbits = blksize_bits(bsize);
+}
+
 int set_blocksize(struct block_device *bdev, int size)
 {
 	/* Size must be a power of two, and between 512 and PAGE_SIZE */
@@ -1408,18 +1422,9 @@
 
 void bd_set_size(struct block_device *bdev, loff_t size)
 {
-	unsigned bsize = bdev_logical_block_size(bdev);
-
 	inode_lock(bdev->bd_inode);
 	i_size_write(bdev->bd_inode, size);
 	inode_unlock(bdev->bd_inode);
-	while (bsize < PAGE_SIZE) {
-		if (size & bsize)
-			break;
-		bsize <<= 1;
-	}
-	bdev->bd_block_size = bsize;
-	bdev->bd_inode->i_blkbits = blksize_bits(bsize);
 }
 EXPORT_SYMBOL(bd_set_size);
 
@@ -1496,8 +1501,10 @@
 				}
 			}
 
-			if (!ret)
+			if (!ret) {
 				bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
+				set_init_blocksize(bdev);
+			}
 
 			/*
 			 * If the device is invalidated, rescan partition
@@ -1532,6 +1539,7 @@
 				goto out_clear;
 			}
 			bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
+			set_init_blocksize(bdev);
 		}
 
 		if (bdev->bd_bdi == &noop_backing_dev_info)
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 1343ac5..7177d1d 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -147,6 +147,12 @@
 	u64 last_unlink_trans;
 
 	/*
+	 * Track the transaction id of the last transaction used to create a
+	 * hard link for the inode. This is used by the log tree (fsync).
+	 */
+	u64 last_link_trans;
+
+	/*
 	 * Number of bytes outstanding that are going to need csums.  This is
 	 * used in ENOSPC accounting.
 	 */
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 089b46c..48ac8b7 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1003,6 +1003,48 @@
 	return 0;
 }
 
+static struct extent_buffer *alloc_tree_block_no_bg_flush(
+					  struct btrfs_trans_handle *trans,
+					  struct btrfs_root *root,
+					  u64 parent_start,
+					  const struct btrfs_disk_key *disk_key,
+					  int level,
+					  u64 hint,
+					  u64 empty_size)
+{
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct extent_buffer *ret;
+
+	/*
+	 * If we are COWing a node/leaf from the extent, chunk, device or free
+	 * space trees, make sure that we do not finish block group creation of
+	 * pending block groups. We do this to avoid a deadlock.
+	 * COWing can result in allocation of a new chunk, and flushing pending
+	 * block groups (btrfs_create_pending_block_groups()) can be triggered
+	 * when finishing allocation of a new chunk. Creation of a pending block
+	 * group modifies the extent, chunk, device and free space trees,
+	 * therefore we could deadlock with ourselves since we are holding a
+	 * lock on an extent buffer that btrfs_create_pending_block_groups() may
+	 * try to COW later.
+	 * For similar reasons, we also need to delay flushing pending block
+	 * groups when splitting a leaf or node, from one of those trees, since
+	 * we are holding a write lock on it and its parent or when inserting a
+	 * new root node for one of those trees.
+	 */
+	if (root == fs_info->extent_root ||
+	    root == fs_info->chunk_root ||
+	    root == fs_info->dev_root ||
+	    root == fs_info->free_space_root)
+		trans->can_flush_pending_bgs = false;
+
+	ret = btrfs_alloc_tree_block(trans, root, parent_start,
+				     root->root_key.objectid, disk_key, level,
+				     hint, empty_size);
+	trans->can_flush_pending_bgs = true;
+
+	return ret;
+}
+
 /*
  * does the dirty work in cow of a single block.  The parent block (if
  * supplied) is updated to point to the new cow copy.  The new buffer is marked
@@ -1050,26 +1092,8 @@
 	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
 		parent_start = parent->start;
 
-	/*
-	 * If we are COWing a node/leaf from the extent, chunk or device trees,
-	 * make sure that we do not finish block group creation of pending block
-	 * groups. We do this to avoid a deadlock.
-	 * COWing can result in allocation of a new chunk, and flushing pending
-	 * block groups (btrfs_create_pending_block_groups()) can be triggered
-	 * when finishing allocation of a new chunk. Creation of a pending block
-	 * group modifies the extent, chunk and device trees, therefore we could
-	 * deadlock with ourselves since we are holding a lock on an extent
-	 * buffer that btrfs_create_pending_block_groups() may try to COW later.
-	 */
-	if (root == fs_info->extent_root ||
-	    root == fs_info->chunk_root ||
-	    root == fs_info->dev_root)
-		trans->can_flush_pending_bgs = false;
-
-	cow = btrfs_alloc_tree_block(trans, root, parent_start,
-			root->root_key.objectid, &disk_key, level,
-			search_start, empty_size);
-	trans->can_flush_pending_bgs = true;
+	cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
+					   level, search_start, empty_size);
 	if (IS_ERR(cow))
 		return PTR_ERR(cow);
 
@@ -2624,14 +2648,27 @@
 	root_lock = BTRFS_READ_LOCK;
 
 	if (p->search_commit_root) {
-		/* The commit roots are read only so we always do read locks */
-		if (p->need_commit_sem)
+		/*
+		 * The commit roots are read only so we always do read locks,
+		 * and we always must hold the commit_root_sem when doing
+		 * searches on them, the only exception is send where we don't
+		 * want to block transaction commits for a long time, so
+		 * we need to clone the commit root in order to avoid races
+		 * with transaction commits that create a snapshot of one of
+		 * the roots used by a send operation.
+		 */
+		if (p->need_commit_sem) {
 			down_read(&fs_info->commit_root_sem);
-		b = root->commit_root;
-		extent_buffer_get(b);
-		level = btrfs_header_level(b);
-		if (p->need_commit_sem)
+			b = btrfs_clone_extent_buffer(root->commit_root);
 			up_read(&fs_info->commit_root_sem);
+			if (!b)
+				return ERR_PTR(-ENOMEM);
+
+		} else {
+			b = root->commit_root;
+			extent_buffer_get(b);
+		}
+		level = btrfs_header_level(b);
 		/*
 		 * Ensure that all callers have set skip_locking when
 		 * p->search_commit_root = 1.
@@ -2757,6 +2794,10 @@
 again:
 	prev_cmp = -1;
 	b = btrfs_search_slot_get_root(root, p, write_lock_level);
+	if (IS_ERR(b)) {
+		ret = PTR_ERR(b);
+		goto done;
+	}
 
 	while (b) {
 		level = btrfs_header_level(b);
@@ -3364,8 +3405,8 @@
 	else
 		btrfs_node_key(lower, &lower_key, 0);
 
-	c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
-				   &lower_key, level, root->node->start, 0);
+	c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
+					 root->node->start, 0);
 	if (IS_ERR(c))
 		return PTR_ERR(c);
 
@@ -3494,8 +3535,8 @@
 	mid = (c_nritems + 1) / 2;
 	btrfs_node_key(c, &disk_key, mid);
 
-	split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
-			&disk_key, level, c->start, 0);
+	split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
+					     c->start, 0);
 	if (IS_ERR(split))
 		return PTR_ERR(split);
 
@@ -4279,8 +4320,8 @@
 	else
 		btrfs_item_key(l, &disk_key, mid);
 
-	right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
-			&disk_key, 0, l->start, 0);
+	right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
+					     l->start, 0);
 	if (IS_ERR(right))
 		return PTR_ERR(right);
 
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 9814347..8fed470 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -800,39 +800,58 @@
 	case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
 		result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED;
 		btrfs_dev_replace_write_unlock(dev_replace);
-		goto leave;
+		break;
 	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
+		result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
+		tgt_device = dev_replace->tgtdev;
+		src_device = dev_replace->srcdev;
+		btrfs_dev_replace_write_unlock(dev_replace);
+		btrfs_scrub_cancel(fs_info);
+		/* btrfs_dev_replace_finishing() will handle the cleanup part */
+		btrfs_info_in_rcu(fs_info,
+			"dev_replace from %s (devid %llu) to %s canceled",
+			btrfs_dev_name(src_device), src_device->devid,
+			btrfs_dev_name(tgt_device));
+		break;
 	case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+		/*
+		 * Scrub doing the replace isn't running so we need to do the
+		 * cleanup step of btrfs_dev_replace_finishing() here
+		 */
 		result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
 		tgt_device = dev_replace->tgtdev;
 		src_device = dev_replace->srcdev;
 		dev_replace->tgtdev = NULL;
 		dev_replace->srcdev = NULL;
+		dev_replace->replace_state =
+				BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
+		dev_replace->time_stopped = ktime_get_real_seconds();
+		dev_replace->item_needs_writeback = 1;
+
+		btrfs_dev_replace_write_unlock(dev_replace);
+
+		btrfs_scrub_cancel(fs_info);
+
+		trans = btrfs_start_transaction(root, 0);
+		if (IS_ERR(trans)) {
+			mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+			return PTR_ERR(trans);
+		}
+		ret = btrfs_commit_transaction(trans);
+		WARN_ON(ret);
+
+		btrfs_info_in_rcu(fs_info,
+		"suspended dev_replace from %s (devid %llu) to %s canceled",
+			btrfs_dev_name(src_device), src_device->devid,
+			btrfs_dev_name(tgt_device));
+
+		if (tgt_device)
+			btrfs_destroy_dev_replace_tgtdev(tgt_device);
 		break;
+	default:
+		result = -EINVAL;
 	}
-	dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
-	dev_replace->time_stopped = ktime_get_real_seconds();
-	dev_replace->item_needs_writeback = 1;
-	btrfs_dev_replace_write_unlock(dev_replace);
-	btrfs_scrub_cancel(fs_info);
 
-	trans = btrfs_start_transaction(root, 0);
-	if (IS_ERR(trans)) {
-		mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
-		return PTR_ERR(trans);
-	}
-	ret = btrfs_commit_transaction(trans);
-	WARN_ON(ret);
-
-	btrfs_info_in_rcu(fs_info,
-		"dev_replace from %s (devid %llu) to %s canceled",
-		btrfs_dev_name(src_device), src_device->devid,
-		btrfs_dev_name(tgt_device));
-
-	if (tgt_device)
-		btrfs_destroy_dev_replace_tgtdev(tgt_device);
-
-leave:
 	mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
 	return result;
 }
@@ -887,6 +906,8 @@
 			   "cannot continue dev_replace, tgtdev is missing");
 		btrfs_info(fs_info,
 			   "you may cancel the operation after 'mount -o degraded'");
+		dev_replace->replace_state =
+					BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
 		btrfs_dev_replace_write_unlock(dev_replace);
 		return 0;
 	}
@@ -898,6 +919,10 @@
 	 * dev-replace to start anyway.
 	 */
 	if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
+		btrfs_dev_replace_write_lock(dev_replace);
+		dev_replace->replace_state =
+					BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
+		btrfs_dev_replace_write_unlock(dev_replace);
 		btrfs_info(fs_info,
 		"cannot resume dev-replace, other exclusive operation running");
 		return 0;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index d4a7f7c..d96d139 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -4155,6 +4155,14 @@
 		spin_lock(&fs_info->ordered_root_lock);
 	}
 	spin_unlock(&fs_info->ordered_root_lock);
+
+	/*
+	 * We need this here because if we've been flipped read-only we won't
+	 * get sync() from the umount, so we need to make sure any ordered
+	 * extents that haven't had their dirty pages IO start writeout yet
+	 * actually get run and error out properly.
+	 */
+	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
 }
 
 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 51e41e5..a16760b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -8911,6 +8911,10 @@
 		goto out_free;
 	}
 
+	err = btrfs_run_delayed_items(trans);
+	if (err)
+		goto out_end_trans;
+
 	if (block_rsv)
 		trans->block_rsv = block_rsv;
 
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 7158b5b..83b3a62 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1373,7 +1373,8 @@
 			 * Do the same check as in btrfs_cross_ref_exist but
 			 * without the unnecessary search.
 			 */
-			if (btrfs_file_extent_generation(leaf, fi) <=
+			if (!nolock &&
+			    btrfs_file_extent_generation(leaf, fi) <=
 			    btrfs_root_last_snapshot(&root->root_item))
 				goto out_check;
 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
@@ -3150,9 +3151,6 @@
 	/* once for the tree */
 	btrfs_put_ordered_extent(ordered_extent);
 
-	/* Try to release some metadata so we don't get an OOM but don't wait */
-	btrfs_btree_balance_dirty_nodelay(fs_info);
-
 	return ret;
 }
 
@@ -3688,6 +3686,21 @@
 	 * inode is not a directory, logging its parent unnecessarily.
 	 */
 	BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
+	/*
+	 * Similar reasoning for last_link_trans, needs to be set otherwise
+	 * for a case like the following:
+	 *
+	 * mkdir A
+	 * touch foo
+	 * ln foo A/bar
+	 * echo 2 > /proc/sys/vm/drop_caches
+	 * fsync foo
+	 * <power failure>
+	 *
+	 * Would result in link bar and directory A not existing after the power
+	 * failure.
+	 */
+	BTRFS_I(inode)->last_link_trans = BTRFS_I(inode)->last_trans;
 
 	path->slots[0]++;
 	if (inode->i_nlink != 1 ||
@@ -6427,14 +6440,19 @@
 		err = btrfs_del_root_ref(trans, key.objectid,
 					 root->root_key.objectid, parent_ino,
 					 &local_index, name, name_len);
-
+		if (err)
+			btrfs_abort_transaction(trans, err);
 	} else if (add_backref) {
 		u64 local_index;
 		int err;
 
 		err = btrfs_del_inode_ref(trans, root, name, name_len,
 					  ino, parent_ino, &local_index);
+		if (err)
+			btrfs_abort_transaction(trans, err);
 	}
+
+	/* Return the original error code */
 	return ret;
 }
 
@@ -6646,6 +6664,7 @@
 			if (err)
 				goto fail;
 		}
+		BTRFS_I(inode)->last_link_trans = trans->transid;
 		d_instantiate(dentry, inode);
 		ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
 					 true, NULL);
@@ -9174,6 +9193,7 @@
 	ei->index_cnt = (u64)-1;
 	ei->dir_index = 0;
 	ei->last_unlink_trans = 0;
+	ei->last_link_trans = 0;
 	ei->last_log_commit = 0;
 
 	spin_lock_init(&ei->lock);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index ff43466..e1fcb28 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1013,16 +1013,22 @@
 		btrfs_abort_transaction(trans, ret);
 		goto out_free_path;
 	}
-	spin_lock(&fs_info->qgroup_lock);
-	fs_info->quota_root = quota_root;
-	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
-	spin_unlock(&fs_info->qgroup_lock);
 
 	ret = btrfs_commit_transaction(trans);
 	trans = NULL;
 	if (ret)
 		goto out_free_path;
 
+	/*
+	 * Set quota enabled flag after committing the transaction, to avoid
+	 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
+	 * creation.
+	 */
+	spin_lock(&fs_info->qgroup_lock);
+	fs_info->quota_root = quota_root;
+	set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+	spin_unlock(&fs_info->qgroup_lock);
+
 	ret = qgroup_rescan_init(fs_info, 0, 1);
 	if (!ret) {
 	        qgroup_rescan_zero_tracking(fs_info);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8ad1458..8888337 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1677,6 +1677,7 @@
 				flags | SB_RDONLY, device_name, data);
 			if (IS_ERR(mnt_root)) {
 				root = ERR_CAST(mnt_root);
+				kfree(subvol_name);
 				goto out;
 			}
 
@@ -1686,12 +1687,14 @@
 			if (error < 0) {
 				root = ERR_PTR(error);
 				mntput(mnt_root);
+				kfree(subvol_name);
 				goto out;
 			}
 		}
 	}
 	if (IS_ERR(mnt_root)) {
 		root = ERR_CAST(mnt_root);
+		kfree(subvol_name);
 		goto out;
 	}
 
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 16ecb76..0805f8c 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -5781,6 +5781,22 @@
 			goto end_trans;
 	}
 
+	/*
+	 * If a new hard link was added to the inode in the current transaction
+	 * and its link count is now greater than 1, we need to fallback to a
+	 * transaction commit, otherwise we can end up not logging all its new
+	 * parents for all the hard links. Here just from the dentry used to
+	 * fsync, we can not visit the ancestor inodes for all the other hard
+	 * links to figure out if any is new, so we fallback to a transaction
+	 * commit (instead of adding a lot of complexity of scanning a btree,
+	 * since this scenario is not a common use case).
+	 */
+	if (inode->vfs_inode.i_nlink > 1 &&
+	    inode->last_link_trans > last_committed) {
+		ret = -EMLINK;
+		goto end_trans;
+	}
+
 	while (1) {
 		if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
 			break;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f4405e4..0ee1cd4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3712,6 +3712,7 @@
 	int ret;
 	u64 num_devices;
 	unsigned seq;
+	bool reducing_integrity;
 
 	if (btrfs_fs_closing(fs_info) ||
 	    atomic_read(&fs_info->balance_pause_req) ||
@@ -3796,24 +3797,30 @@
 		     !(bctl->sys.target & allowed)) ||
 		    ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
 		     (fs_info->avail_metadata_alloc_bits & allowed) &&
-		     !(bctl->meta.target & allowed))) {
-			if (bctl->flags & BTRFS_BALANCE_FORCE) {
-				btrfs_info(fs_info,
-				"balance: force reducing metadata integrity");
-			} else {
-				btrfs_err(fs_info,
-	"balance: reduces metadata integrity, use --force if you want this");
-				ret = -EINVAL;
-				goto out;
-			}
-		}
+		     !(bctl->meta.target & allowed)))
+			reducing_integrity = true;
+		else
+			reducing_integrity = false;
+
+		/* if we're not converting, the target field is uninitialized */
+		meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
+			bctl->meta.target : fs_info->avail_metadata_alloc_bits;
+		data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
+			bctl->data.target : fs_info->avail_data_alloc_bits;
 	} while (read_seqretry(&fs_info->profiles_lock, seq));
 
-	/* if we're not converting, the target field is uninitialized */
-	meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
-		bctl->meta.target : fs_info->avail_metadata_alloc_bits;
-	data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
-		bctl->data.target : fs_info->avail_data_alloc_bits;
+	if (reducing_integrity) {
+		if (bctl->flags & BTRFS_BALANCE_FORCE) {
+			btrfs_info(fs_info,
+				   "balance: force reducing metadata integrity");
+		} else {
+			btrfs_err(fs_info,
+	  "balance: reduces metadata integrity, use --force if you want this");
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
 	if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
 		btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
 		int meta_index = btrfs_bg_flags_to_raid_index(meta_target);
@@ -4761,19 +4768,17 @@
 	/*
 	 * Use the number of data stripes to figure out how big this chunk
 	 * is really going to be in terms of logical address space,
-	 * and compare that answer with the max chunk size
+	 * and compare that answer with the max chunk size. If it's higher,
+	 * we try to reduce stripe_size.
 	 */
 	if (stripe_size * data_stripes > max_chunk_size) {
-		stripe_size = div_u64(max_chunk_size, data_stripes);
-
-		/* bump the answer up to a 16MB boundary */
-		stripe_size = round_up(stripe_size, SZ_16M);
-
 		/*
-		 * But don't go higher than the limits we found while searching
-		 * for free extents
+		 * Reduce stripe_size, round it up to a 16MB boundary again and
+		 * then use it, unless it ends up being even bigger than the
+		 * previous value we had already.
 		 */
-		stripe_size = min(devices_info[ndevs - 1].max_avail,
+		stripe_size = min(round_up(div_u64(max_chunk_size,
+						   data_stripes), SZ_16M),
 				  stripe_size);
 	}
 
@@ -7467,6 +7472,8 @@
 	struct btrfs_path *path;
 	struct btrfs_root *root = fs_info->dev_root;
 	struct btrfs_key key;
+	u64 prev_devid = 0;
+	u64 prev_dev_ext_end = 0;
 	int ret = 0;
 
 	key.objectid = 1;
@@ -7511,10 +7518,22 @@
 		chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
 		physical_len = btrfs_dev_extent_length(leaf, dext);
 
+		/* Check if this dev extent overlaps with the previous one */
+		if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
+			btrfs_err(fs_info,
+"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
+				  devid, physical_offset, prev_dev_ext_end);
+			ret = -EUCLEAN;
+			goto out;
+		}
+
 		ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
 					    physical_offset, physical_len);
 		if (ret < 0)
 			goto out;
+		prev_devid = devid;
+		prev_dev_ext_end = physical_offset + physical_len;
+
 		ret = btrfs_next_item(root, path);
 		if (ret < 0)
 			goto out;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index ea78c3d..f141b45 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -11,6 +11,7 @@
 #include <linux/security.h>
 #include <linux/posix_acl_xattr.h>
 #include <linux/iversion.h>
+#include <linux/sched/mm.h>
 #include "ctree.h"
 #include "btrfs_inode.h"
 #include "transaction.h"
@@ -422,9 +423,15 @@
 {
 	const struct xattr *xattr;
 	struct btrfs_trans_handle *trans = fs_info;
+	unsigned int nofs_flag;
 	char *name;
 	int err = 0;
 
+	/*
+	 * We're holding a transaction handle, so use a NOFS memory allocation
+	 * context to avoid deadlock if reclaim happens.
+	 */
+	nofs_flag = memalloc_nofs_save();
 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
 		name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
 			       strlen(xattr->name) + 1, GFP_KERNEL);
@@ -440,6 +447,7 @@
 		if (err < 0)
 			break;
 	}
+	memalloc_nofs_restore(nofs_flag);
 	return err;
 }
 
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index dd7dfdd..c7542e8 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1030,6 +1030,8 @@
 	list_del_init(&ci->i_snap_realm_item);
 	ci->i_snap_realm_counter++;
 	ci->i_snap_realm = NULL;
+	if (realm->ino == ci->i_vino.ino)
+		realm->inode = NULL;
 	spin_unlock(&realm->inodes_with_caps_lock);
 	ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
 			    realm);
@@ -3566,7 +3568,6 @@
 			tcap->cap_id = t_cap_id;
 			tcap->seq = t_seq - 1;
 			tcap->issue_seq = t_seq - 1;
-			tcap->mseq = t_mseq;
 			tcap->issued |= issued;
 			tcap->implemented |= issued;
 			if (cap == ci->i_auth_cap)
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 5657b79..269471c 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1458,18 +1458,26 @@
 }
 
 static int
-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+		     bool malformed)
 {
 	int length;
-	struct cifs_readdata *rdata = mid->callback_data;
 
 	length = cifs_discard_remaining_data(server);
-	dequeue_mid(mid, rdata->result);
+	dequeue_mid(mid, malformed);
 	mid->resp_buf = server->smallbuf;
 	server->smallbuf = NULL;
 	return length;
 }
 
+static int
+cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+{
+	struct cifs_readdata *rdata = mid->callback_data;
+
+	return  __cifs_readv_discard(server, mid, rdata->result);
+}
+
 int
 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 {
@@ -1511,12 +1519,23 @@
 		return -1;
 	}
 
+	/* set up first two iov for signature check and to get credits */
+	rdata->iov[0].iov_base = buf;
+	rdata->iov[0].iov_len = 4;
+	rdata->iov[1].iov_base = buf + 4;
+	rdata->iov[1].iov_len = server->total_read - 4;
+	cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
+		 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
+	cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
+		 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
+
 	/* Was the SMB read successful? */
 	rdata->result = server->ops->map_error(buf, false);
 	if (rdata->result != 0) {
 		cifs_dbg(FYI, "%s: server returned error %d\n",
 			 __func__, rdata->result);
-		return cifs_readv_discard(server, mid);
+		/* normal error on read response */
+		return __cifs_readv_discard(server, mid, false);
 	}
 
 	/* Is there enough to get to the rest of the READ_RSP header? */
@@ -1560,14 +1579,6 @@
 		server->total_read += length;
 	}
 
-	/* set up first iov for signature check */
-	rdata->iov[0].iov_base = buf;
-	rdata->iov[0].iov_len = 4;
-	rdata->iov[1].iov_base = buf + 4;
-	rdata->iov[1].iov_len = server->total_read - 4;
-	cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
-		 rdata->iov[0].iov_base, server->total_read);
-
 	/* how much data is in the response? */
 #ifdef CONFIG_CIFS_SMB_DIRECT
 	use_rdma_mr = rdata->mr;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 52d71b6..a5ea742 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -50,6 +50,7 @@
 #include "cifs_unicode.h"
 #include "cifs_debug.h"
 #include "cifs_fs_sb.h"
+#include "dns_resolve.h"
 #include "ntlmssp.h"
 #include "nterr.h"
 #include "rfc1002pdu.h"
@@ -318,6 +319,53 @@
 					const char *devname, bool is_smb3);
 
 /*
+ * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
+ * get their ip addresses changed at some point.
+ *
+ * This should be called with server->srv_mutex held.
+ */
+#ifdef CONFIG_CIFS_DFS_UPCALL
+static int reconn_set_ipaddr(struct TCP_Server_Info *server)
+{
+	int rc;
+	int len;
+	char *unc, *ipaddr = NULL;
+
+	if (!server->hostname)
+		return -EINVAL;
+
+	len = strlen(server->hostname) + 3;
+
+	unc = kmalloc(len, GFP_KERNEL);
+	if (!unc) {
+		cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
+		return -ENOMEM;
+	}
+	snprintf(unc, len, "\\\\%s", server->hostname);
+
+	rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
+	kfree(unc);
+
+	if (rc < 0) {
+		cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
+			 __func__, server->hostname, rc);
+		return rc;
+	}
+
+	rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
+				  strlen(ipaddr));
+	kfree(ipaddr);
+
+	return !rc ? -1 : 0;
+}
+#else
+static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
+{
+	return 0;
+}
+#endif
+
+/*
  * cifs tcp session reconnection
  *
  * mark tcp session as reconnecting so temporarily locked
@@ -417,6 +465,11 @@
 			rc = generic_ip_connect(server);
 		if (rc) {
 			cifs_dbg(FYI, "reconnect error %d\n", rc);
+			rc = reconn_set_ipaddr(server);
+			if (rc) {
+				cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
+					 __func__, rc);
+			}
 			mutex_unlock(&server->srv_mutex);
 			msleep(3000);
 		} else {
@@ -533,6 +586,21 @@
 	return false;
 }
 
+static inline bool
+zero_credits(struct TCP_Server_Info *server)
+{
+	int val;
+
+	spin_lock(&server->req_lock);
+	val = server->credits + server->echo_credits + server->oplock_credits;
+	if (server->in_flight == 0 && val == 0) {
+		spin_unlock(&server->req_lock);
+		return true;
+	}
+	spin_unlock(&server->req_lock);
+	return false;
+}
+
 static int
 cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
 {
@@ -545,6 +613,12 @@
 	for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
 		try_to_freeze();
 
+		/* reconnect if no credits and no requests in flight */
+		if (zero_credits(server)) {
+			cifs_reconnect(server);
+			return -ECONNABORTED;
+		}
+
 		if (server_unresponsive(server))
 			return -ECONNABORTED;
 		if (cifs_rdma_enabled(server) && server->smbd_conn)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8d41ca7..7b637fc 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1120,10 +1120,10 @@
 
 	/*
 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
-	 * and check it for zero before using.
+	 * and check it before using.
 	 */
 	max_buf = tcon->ses->server->maxBuf;
-	if (!max_buf) {
+	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
 		free_xid(xid);
 		return -EINVAL;
 	}
@@ -1460,10 +1460,10 @@
 
 	/*
 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
-	 * and check it for zero before using.
+	 * and check it before using.
 	 */
 	max_buf = tcon->ses->server->maxBuf;
-	if (!max_buf)
+	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
 		return -EINVAL;
 
 	max_num = (max_buf - sizeof(struct smb_hdr)) /
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 4ed10dd..2fc3d31 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -122,10 +122,10 @@
 
 	/*
 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
-	 * and check it for zero before using.
+	 * and check it before using.
 	 */
 	max_buf = tcon->ses->server->maxBuf;
-	if (!max_buf)
+	if (max_buf < sizeof(struct smb2_lock_element))
 		return -EINVAL;
 
 	max_num = max_buf / sizeof(struct smb2_lock_element);
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 20a2d30..c3ae8c1 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -379,8 +379,8 @@
 	{STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"},
 	{STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"},
 	{STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"},
-	{STATUS_FILE_LOCK_CONFLICT, -EIO, "STATUS_FILE_LOCK_CONFLICT"},
-	{STATUS_LOCK_NOT_GRANTED, -EIO, "STATUS_LOCK_NOT_GRANTED"},
+	{STATUS_FILE_LOCK_CONFLICT, -EACCES, "STATUS_FILE_LOCK_CONFLICT"},
+	{STATUS_LOCK_NOT_GRANTED, -EACCES, "STATUS_LOCK_NOT_GRANTED"},
 	{STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"},
 	{STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS,
 	"STATUS_CTL_FILE_NOT_SUPPORTED"},
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 6a9c475..7b8b58f 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -648,6 +648,13 @@
 	if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK)
 		return false;
 
+	if (rsp->sync_hdr.CreditRequest) {
+		spin_lock(&server->req_lock);
+		server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest);
+		spin_unlock(&server->req_lock);
+		wake_up(&server->request_q);
+	}
+
 	if (rsp->StructureSize !=
 				smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
 		if (le16_to_cpu(rsp->StructureSize) == 44)
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 812da3e..237d728 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -34,6 +34,7 @@
 #include "cifs_ioctl.h"
 #include "smbdirect.h"
 
+/* Change credits for different ops and return the total number of credits */
 static int
 change_conf(struct TCP_Server_Info *server)
 {
@@ -41,17 +42,15 @@
 	server->oplock_credits = server->echo_credits = 0;
 	switch (server->credits) {
 	case 0:
-		return -1;
+		return 0;
 	case 1:
 		server->echoes = false;
 		server->oplocks = false;
-		cifs_dbg(VFS, "disabling echoes and oplocks\n");
 		break;
 	case 2:
 		server->echoes = true;
 		server->oplocks = false;
 		server->echo_credits = 1;
-		cifs_dbg(FYI, "disabling oplocks\n");
 		break;
 	default:
 		server->echoes = true;
@@ -64,14 +63,15 @@
 		server->echo_credits = 1;
 	}
 	server->credits -= server->echo_credits + server->oplock_credits;
-	return 0;
+	return server->credits + server->echo_credits + server->oplock_credits;
 }
 
 static void
 smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
 		 const int optype)
 {
-	int *val, rc = 0;
+	int *val, rc = -1;
+
 	spin_lock(&server->req_lock);
 	val = server->ops->get_credits_field(server, optype);
 	*val += add;
@@ -95,8 +95,26 @@
 	}
 	spin_unlock(&server->req_lock);
 	wake_up(&server->request_q);
-	if (rc)
-		cifs_reconnect(server);
+
+	if (server->tcpStatus == CifsNeedReconnect)
+		return;
+
+	switch (rc) {
+	case -1:
+		/* change_conf hasn't been executed */
+		break;
+	case 0:
+		cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
+		break;
+	case 1:
+		cifs_dbg(VFS, "disabling echoes and oplocks\n");
+		break;
+	case 2:
+		cifs_dbg(FYI, "disabling oplocks\n");
+		break;
+	default:
+		cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
+	}
 }
 
 static void
@@ -154,14 +172,14 @@
 
 			scredits = server->credits;
 			/* can deadlock with reopen */
-			if (scredits == 1) {
+			if (scredits <= 8) {
 				*num = SMB2_MAX_BUFFER_SIZE;
 				*credits = 0;
 				break;
 			}
 
-			/* leave one credit for a possible reopen */
-			scredits--;
+			/* leave some credits for reopen and other ops */
+			scredits -= 8;
 			*num = min_t(unsigned int, size,
 				     scredits * SMB2_MAX_BUFFER_SIZE);
 
@@ -2901,11 +2919,23 @@
 			server->ops->is_status_pending(buf, server, 0))
 		return -1;
 
-	rdata->result = server->ops->map_error(buf, false);
+	/* set up first two iov to get credits */
+	rdata->iov[0].iov_base = buf;
+	rdata->iov[0].iov_len = 4;
+	rdata->iov[1].iov_base = buf + 4;
+	rdata->iov[1].iov_len =
+		min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4;
+	cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
+		 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
+	cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
+		 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
+
+	rdata->result = server->ops->map_error(buf, true);
 	if (rdata->result != 0) {
 		cifs_dbg(FYI, "%s: server returned error %d\n",
 			 __func__, rdata->result);
-		dequeue_mid(mid, rdata->result);
+		/* normal error on read response */
+		dequeue_mid(mid, false);
 		return 0;
 	}
 
@@ -2978,14 +3008,6 @@
 		return 0;
 	}
 
-	/* set up first iov for signature check */
-	rdata->iov[0].iov_base = buf;
-	rdata->iov[0].iov_len = 4;
-	rdata->iov[1].iov_base = buf + 4;
-	rdata->iov[1].iov_len = server->vals->read_rsp_size - 4;
-	cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
-		 rdata->iov[0].iov_base, server->vals->read_rsp_size);
-
 	length = rdata->copy_into_pages(server, rdata, &iter);
 
 	kfree(bvec);
@@ -3184,8 +3206,10 @@
 	}
 
 	/* TODO: add support for compounds containing READ. */
-	if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server))
+	if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
+		*num_mids = 1;
 		return receive_encrypted_read(server, &mids[0]);
+	}
 
 	return receive_encrypted_standard(server, mids, bufs, num_mids);
 }
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index f54d07b..8a01e89 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -3127,8 +3127,17 @@
 		rdata->mr = NULL;
 	}
 #endif
-	if (rdata->result)
+	if (rdata->result && rdata->result != -ENODATA) {
 		cifs_stats_fail_inc(tcon, SMB2_READ_HE);
+		trace_smb3_read_err(0 /* xid */,
+				    rdata->cfile->fid.persistent_fid,
+				    tcon->tid, tcon->ses->Suid, rdata->offset,
+				    rdata->bytes, rdata->result);
+	} else
+		trace_smb3_read_done(0 /* xid */,
+				     rdata->cfile->fid.persistent_fid,
+				     tcon->tid, tcon->ses->Suid,
+				     rdata->offset, rdata->got_bytes);
 
 	queue_work(cifsiod_wq, &rdata->work);
 	DeleteMidQEntry(mid);
@@ -3185,12 +3194,14 @@
 	if (rdata->credits) {
 		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
 						SMB2_MAX_BUFFER_SIZE));
-		shdr->CreditRequest = shdr->CreditCharge;
+		shdr->CreditRequest =
+			cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
 		spin_lock(&server->req_lock);
 		server->credits += rdata->credits -
 						le16_to_cpu(shdr->CreditCharge);
 		spin_unlock(&server->req_lock);
 		wake_up(&server->request_q);
+		rdata->credits = le16_to_cpu(shdr->CreditCharge);
 		flags |= CIFS_HAS_CREDITS;
 	}
 
@@ -3201,13 +3212,11 @@
 	if (rc) {
 		kref_put(&rdata->refcount, cifs_readdata_release);
 		cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
-		trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid,
-				   io_parms.tcon->tid, io_parms.tcon->ses->Suid,
-				   io_parms.offset, io_parms.length);
-	} else
-		trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid,
-				   io_parms.tcon->tid, io_parms.tcon->ses->Suid,
-				   io_parms.offset, io_parms.length);
+		trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
+				    io_parms.tcon->tid,
+				    io_parms.tcon->ses->Suid,
+				    io_parms.offset, io_parms.length, rc);
+	}
 
 	cifs_small_buf_release(buf);
 	return rc;
@@ -3251,10 +3260,11 @@
 		if (rc != -ENODATA) {
 			cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
 			cifs_dbg(VFS, "Send error in read = %d\n", rc);
+			trace_smb3_read_err(xid, req->PersistentFileId,
+					    io_parms->tcon->tid, ses->Suid,
+					    io_parms->offset, io_parms->length,
+					    rc);
 		}
-		trace_smb3_read_err(rc, xid, req->PersistentFileId,
-				    io_parms->tcon->tid, ses->Suid,
-				    io_parms->offset, io_parms->length);
 		free_rsp_buf(resp_buftype, rsp_iov.iov_base);
 		return rc == -ENODATA ? 0 : rc;
 	} else
@@ -3340,8 +3350,17 @@
 		wdata->mr = NULL;
 	}
 #endif
-	if (wdata->result)
+	if (wdata->result) {
 		cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
+		trace_smb3_write_err(0 /* no xid */,
+				     wdata->cfile->fid.persistent_fid,
+				     tcon->tid, tcon->ses->Suid, wdata->offset,
+				     wdata->bytes, wdata->result);
+	} else
+		trace_smb3_write_done(0 /* no xid */,
+				      wdata->cfile->fid.persistent_fid,
+				      tcon->tid, tcon->ses->Suid,
+				      wdata->offset, wdata->bytes);
 
 	queue_work(cifsiod_wq, &wdata->work);
 	DeleteMidQEntry(mid);
@@ -3462,12 +3481,14 @@
 	if (wdata->credits) {
 		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
 						    SMB2_MAX_BUFFER_SIZE));
-		shdr->CreditRequest = shdr->CreditCharge;
+		shdr->CreditRequest =
+			cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
 		spin_lock(&server->req_lock);
 		server->credits += wdata->credits -
 						le16_to_cpu(shdr->CreditCharge);
 		spin_unlock(&server->req_lock);
 		wake_up(&server->request_q);
+		wdata->credits = le16_to_cpu(shdr->CreditCharge);
 		flags |= CIFS_HAS_CREDITS;
 	}
 
@@ -3481,10 +3502,7 @@
 				     wdata->bytes, rc);
 		kref_put(&wdata->refcount, release);
 		cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
-	} else
-		trace_smb3_write_done(0 /* no xid */, req->PersistentFileId,
-				     tcon->tid, tcon->ses->Suid, wdata->offset,
-				     wdata->bytes);
+	}
 
 async_writev_out:
 	cifs_small_buf_release(req);
@@ -3710,8 +3728,8 @@
 		    rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
 			srch_inf->endOfSearch = true;
 			rc = 0;
-		}
-		cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
+		} else
+			cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
 		goto qdir_exit;
 	}
 
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 333729c..66348b3d 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -378,7 +378,7 @@
 	if (rc < 0 && rc != -EINTR)
 		cifs_dbg(VFS, "Error %d sending data on socket to server\n",
 			 rc);
-	else
+	else if (rc > 0)
 		rc = 0;
 
 	return rc;
@@ -786,7 +786,8 @@
 	int i, j, rc = 0;
 	int timeout, optype;
 	struct mid_q_entry *midQ[MAX_COMPOUND];
-	unsigned int credits = 0;
+	bool cancelled_mid[MAX_COMPOUND] = {false};
+	unsigned int credits[MAX_COMPOUND] = {0};
 	char *buf;
 
 	timeout = flags & CIFS_TIMEOUT_MASK;
@@ -804,13 +805,31 @@
 		return -ENOENT;
 
 	/*
-	 * Ensure that we do not send more than 50 overlapping requests
-	 * to the same server. We may make this configurable later or
-	 * use ses->maxReq.
+	 * Ensure we obtain 1 credit per request in the compound chain.
+	 * It can be optimized further by waiting for all the credits
+	 * at once but this can wait long enough if we don't have enough
+	 * credits due to some heavy operations in progress or the server
+	 * not granting us much, so a fallback to the current approach is
+	 * needed anyway.
 	 */
-	rc = wait_for_free_request(ses->server, timeout, optype);
-	if (rc)
-		return rc;
+	for (i = 0; i < num_rqst; i++) {
+		rc = wait_for_free_request(ses->server, timeout, optype);
+		if (rc) {
+			/*
+			 * We haven't sent an SMB packet to the server yet but
+			 * we already obtained credits for i requests in the
+			 * compound chain - need to return those credits back
+			 * for future use. Note that we need to call add_credits
+			 * multiple times to match the way we obtained credits
+			 * in the first place and to account for in flight
+			 * requests correctly.
+			 */
+			for (j = 0; j < i; j++)
+				add_credits(ses->server, 1, optype);
+			return rc;
+		}
+		credits[i] = 1;
+	}
 
 	/*
 	 * Make sure that we sign in the same order that we send on this socket
@@ -826,8 +845,10 @@
 			for (j = 0; j < i; j++)
 				cifs_delete_mid(midQ[j]);
 			mutex_unlock(&ses->server->srv_mutex);
+
 			/* Update # of requests on wire to server */
-			add_credits(ses->server, 1, optype);
+			for (j = 0; j < num_rqst; j++)
+				add_credits(ses->server, credits[j], optype);
 			return PTR_ERR(midQ[i]);
 		}
 
@@ -874,19 +895,16 @@
 			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
 				midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
 				midQ[i]->callback = DeleteMidQEntry;
-				spin_unlock(&GlobalMid_Lock);
-				add_credits(ses->server, 1, optype);
-				return rc;
+				cancelled_mid[i] = true;
 			}
 			spin_unlock(&GlobalMid_Lock);
 		}
 	}
 
 	for (i = 0; i < num_rqst; i++)
-		if (midQ[i]->resp_buf)
-			credits += ses->server->ops->get_credits(midQ[i]);
-	if (!credits)
-		credits = 1;
+		if (!cancelled_mid[i] && midQ[i]->resp_buf
+		    && (midQ[i]->mid_state == MID_RESPONSE_RECEIVED))
+			credits[i] = ses->server->ops->get_credits(midQ[i]);
 
 	for (i = 0; i < num_rqst; i++) {
 		if (rc < 0)
@@ -894,8 +912,9 @@
 
 		rc = cifs_sync_mid_result(midQ[i], ses->server);
 		if (rc != 0) {
-			add_credits(ses->server, credits, optype);
-			return rc;
+			/* mark this mid as cancelled to not free it below */
+			cancelled_mid[i] = true;
+			goto out;
 		}
 
 		if (!midQ[i]->resp_buf ||
@@ -942,9 +961,11 @@
 	 * This is prevented above by using a noop callback that will not
 	 * wake this thread except for the very last PDU.
 	 */
-	for (i = 0; i < num_rqst; i++)
-		cifs_delete_mid(midQ[i]);
-	add_credits(ses->server, credits, optype);
+	for (i = 0; i < num_rqst; i++) {
+		if (!cancelled_mid[i])
+			cifs_delete_mid(midQ[i]);
+		add_credits(ses->server, credits[i], optype);
+	}
 
 	return rc;
 }
diff --git a/fs/dax.c b/fs/dax.c
index 3a2682a..09fa706 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -229,8 +229,8 @@
  *
  * Must be called with the i_pages lock held.
  */
-static void *__get_unlocked_mapping_entry(struct address_space *mapping,
-		pgoff_t index, void ***slotp, bool (*wait_fn)(void))
+static void *get_unlocked_mapping_entry(struct address_space *mapping,
+		pgoff_t index, void ***slotp)
 {
 	void *entry, **slot;
 	struct wait_exceptional_entry_queue ewait;
@@ -240,8 +240,6 @@
 	ewait.wait.func = wake_exceptional_entry_func;
 
 	for (;;) {
-		bool revalidate;
-
 		entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
 					  &slot);
 		if (!entry ||
@@ -256,30 +254,37 @@
 		prepare_to_wait_exclusive(wq, &ewait.wait,
 					  TASK_UNINTERRUPTIBLE);
 		xa_unlock_irq(&mapping->i_pages);
-		revalidate = wait_fn();
+		schedule();
 		finish_wait(wq, &ewait.wait);
 		xa_lock_irq(&mapping->i_pages);
-		if (revalidate) {
-			put_unlocked_mapping_entry(mapping, index, entry);
-			return ERR_PTR(-EAGAIN);
-		}
 	}
 }
 
-static bool entry_wait(void)
+/*
+ * The only thing keeping the address space around is the i_pages lock
+ * (it's cycled in clear_inode() after removing the entries from i_pages)
+ * After we call xas_unlock_irq(), we cannot touch xas->xa.
+ */
+static void wait_entry_unlocked(struct address_space *mapping, pgoff_t index,
+		void ***slotp, void *entry)
 {
-	schedule();
-	/*
-	 * Never return an ERR_PTR() from
-	 * __get_unlocked_mapping_entry(), just keep looping.
-	 */
-	return false;
-}
+	struct wait_exceptional_entry_queue ewait;
+	wait_queue_head_t *wq;
 
-static void *get_unlocked_mapping_entry(struct address_space *mapping,
-		pgoff_t index, void ***slotp)
-{
-	return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait);
+	init_wait(&ewait.wait);
+	ewait.wait.func = wake_exceptional_entry_func;
+
+	wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
+	/*
+	 * Unlike get_unlocked_entry() there is no guarantee that this
+	 * path ever successfully retrieves an unlocked entry before an
+	 * inode dies. Perform a non-exclusive wait in case this path
+	 * never successfully performs its own wake up.
+	 */
+	prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
+	xa_unlock_irq(&mapping->i_pages);
+	schedule();
+	finish_wait(wq, &ewait.wait);
 }
 
 static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
@@ -398,19 +403,6 @@
 	return NULL;
 }
 
-static bool entry_wait_revalidate(void)
-{
-	rcu_read_unlock();
-	schedule();
-	rcu_read_lock();
-
-	/*
-	 * Tell __get_unlocked_mapping_entry() to take a break, we need
-	 * to revalidate page->mapping after dropping locks
-	 */
-	return true;
-}
-
 bool dax_lock_mapping_entry(struct page *page)
 {
 	pgoff_t index;
@@ -446,14 +438,15 @@
 		}
 		index = page->index;
 
-		entry = __get_unlocked_mapping_entry(mapping, index, &slot,
-				entry_wait_revalidate);
+		entry = __radix_tree_lookup(&mapping->i_pages, index,
+						NULL, &slot);
 		if (!entry) {
 			xa_unlock_irq(&mapping->i_pages);
 			break;
-		} else if (IS_ERR(entry)) {
-			xa_unlock_irq(&mapping->i_pages);
-			WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
+		} else if (slot_locked(mapping, slot)) {
+			rcu_read_unlock();
+			wait_entry_unlocked(mapping, index, &slot, entry);
+			rcu_read_lock();
 			continue;
 		}
 		lock_slot(mapping, slot);
diff --git a/fs/dcache.c b/fs/dcache.c
index 2e7e8d8..cb515f1 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1202,15 +1202,11 @@
  */
 void shrink_dcache_sb(struct super_block *sb)
 {
-	long freed;
-
 	do {
 		LIST_HEAD(dispose);
 
-		freed = list_lru_walk(&sb->s_dentry_lru,
+		list_lru_walk(&sb->s_dentry_lru,
 			dentry_lru_isolate_shrink, &dispose, 1024);
-
-		this_cpu_sub(nr_dentry_unused, freed);
 		shrink_dentry_list(&dispose);
 	} while (list_lru_count(&sb->s_dentry_lru) > 0);
 }
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index cc91963..a928ba0 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1209,6 +1209,7 @@
 
 	if (rv < 0) {
 		log_error(ls, "create_lkb idr error %d", rv);
+		dlm_free_lkb(lkb);
 		return rv;
 	}
 
@@ -4179,6 +4180,7 @@
 			  (unsigned long long)lkb->lkb_recover_seq,
 			  ms->m_header.h_nodeid, ms->m_lkid);
 		error = -ENOENT;
+		dlm_put_lkb(lkb);
 		goto fail;
 	}
 
@@ -4232,6 +4234,7 @@
 			  lkb->lkb_id, lkb->lkb_remid,
 			  ms->m_header.h_nodeid, ms->m_lkid);
 		error = -ENOENT;
+		dlm_put_lkb(lkb);
 		goto fail;
 	}
 
@@ -5792,20 +5795,20 @@
 			goto out;
 		}
 	}
-
-	/* After ua is attached to lkb it will be freed by dlm_free_lkb().
-	   When DLM_IFL_USER is set, the dlm knows that this is a userspace
-	   lock and that lkb_astparam is the dlm_user_args structure. */
-
 	error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
 			      fake_astfn, ua, fake_bastfn, &args);
-	lkb->lkb_flags |= DLM_IFL_USER;
-
 	if (error) {
+		kfree(ua->lksb.sb_lvbptr);
+		ua->lksb.sb_lvbptr = NULL;
+		kfree(ua);
 		__put_lkb(ls, lkb);
 		goto out;
 	}
 
+	/* After ua is attached to lkb it will be freed by dlm_free_lkb().
+	   When DLM_IFL_USER is set, the dlm knows that this is a userspace
+	   lock and that lkb_astparam is the dlm_user_args structure. */
+	lkb->lkb_flags |= DLM_IFL_USER;
 	error = request_lock(ls, lkb, name, namelen, &args);
 
 	switch (error) {
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 5ba94be..6a1529e 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -680,11 +680,11 @@
 	kfree(ls->ls_recover_buf);
  out_lkbidr:
 	idr_destroy(&ls->ls_lkbidr);
+ out_rsbtbl:
 	for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
 		if (ls->ls_remove_names[i])
 			kfree(ls->ls_remove_names[i]);
 	}
- out_rsbtbl:
 	vfree(ls->ls_rsbtbl);
  out_lsfree:
 	if (do_unreg)
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 5cfb1e2..032cf9b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2459,8 +2459,19 @@
 #define FALL_BACK_TO_NONDELALLOC 1
 #define CONVERT_INLINE_DATA	 2
 
-extern struct inode *ext4_iget(struct super_block *, unsigned long);
-extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
+typedef enum {
+	EXT4_IGET_NORMAL =	0,
+	EXT4_IGET_SPECIAL =	0x0001, /* OK to iget a system inode */
+	EXT4_IGET_HANDLE = 	0x0002	/* Inode # is from a handle */
+} ext4_iget_flags;
+
+extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+				 ext4_iget_flags flags, const char *function,
+				 unsigned int line);
+
+#define ext4_iget(sb, ino, flags) \
+	__ext4_iget((sb), (ino), (flags), __func__, __LINE__)
+
 extern int  ext4_write_inode(struct inode *, struct writeback_control *);
 extern int  ext4_setattr(struct dentry *, struct iattr *);
 extern int  ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
@@ -2542,6 +2553,8 @@
 extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
 
 /* super.c */
+extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
+					 sector_t block, int op_flags);
 extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
 extern int ext4_calculate_overhead(struct super_block *sb);
 extern void ext4_superblock_csum_set(struct super_block *sb);
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 26a7fe5..712f009 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -116,8 +116,16 @@
 		goto out;
 	}
 
+	ret = file_write_and_wait_range(file, start, end);
+	if (ret)
+		return ret;
+
 	if (!journal) {
-		ret = __generic_file_fsync(file, start, end, datasync);
+		struct writeback_control wbc = {
+			.sync_mode = WB_SYNC_ALL
+		};
+
+		ret = ext4_write_inode(inode, &wbc);
 		if (!ret)
 			ret = ext4_sync_parent(inode);
 		if (test_opt(inode->i_sb, BARRIER))
@@ -125,9 +133,6 @@
 		goto out;
 	}
 
-	ret = file_write_and_wait_range(file, start, end);
-	if (ret)
-		return ret;
 	/*
 	 * data=writeback,ordered:
 	 *  The caller's filemap_fdatawrite()/wait will sync the data.
@@ -159,6 +164,9 @@
 			ret = err;
 	}
 out:
+	err = file_check_and_advance_wb_err(file);
+	if (ret == 0)
+		ret = err;
 	trace_ext4_sync_file_exit(inode, ret);
 	return ret;
 }
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 2addcb8..091a18a 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1225,7 +1225,7 @@
 	if (!ext4_test_bit(bit, bitmap_bh->b_data))
 		goto bad_orphan;
 
-	inode = ext4_iget(sb, ino);
+	inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
 	if (IS_ERR(inode)) {
 		err = PTR_ERR(inode);
 		ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 1463f5c..8a7394c 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -719,8 +719,11 @@
 
 	if (!PageUptodate(page)) {
 		ret = ext4_read_inline_page(inode, page);
-		if (ret < 0)
+		if (ret < 0) {
+			unlock_page(page);
+			put_page(page);
 			goto out_up_read;
+		}
 	}
 
 	ret = 1;
@@ -1901,12 +1904,12 @@
 	physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
 	physical += offsetof(struct ext4_inode, i_block);
 
-	if (physical)
-		error = fiemap_fill_next_extent(fieinfo, start, physical,
-						inline_len, flags);
 	brelse(iloc.bh);
 out:
 	up_read(&EXT4_I(inode)->xattr_sem);
+	if (physical)
+		error = fiemap_fill_next_extent(fieinfo, start, physical,
+						inline_len, flags);
 	return (error < 0 ? error : 0);
 }
 
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index de6fb66..6906ada 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2761,7 +2761,8 @@
 		 * We may need to convert up to one extent per block in
 		 * the page and we may dirty the inode.
 		 */
-		rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
+		rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
+						PAGE_SIZE >> inode->i_blkbits);
 	}
 
 	/*
@@ -4848,7 +4849,9 @@
 		return inode_peek_iversion(inode);
 }
 
-struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+			  ext4_iget_flags flags, const char *function,
+			  unsigned int line)
 {
 	struct ext4_iloc iloc;
 	struct ext4_inode *raw_inode;
@@ -4862,6 +4865,18 @@
 	gid_t i_gid;
 	projid_t i_projid;
 
+	if ((!(flags & EXT4_IGET_SPECIAL) &&
+	     (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
+	    (ino < EXT4_ROOT_INO) ||
+	    (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
+		if (flags & EXT4_IGET_HANDLE)
+			return ERR_PTR(-ESTALE);
+		__ext4_error(sb, function, line,
+			     "inode #%lu: comm %s: iget: illegal inode #",
+			     ino, current->comm);
+		return ERR_PTR(-EFSCORRUPTED);
+	}
+
 	inode = iget_locked(sb, ino);
 	if (!inode)
 		return ERR_PTR(-ENOMEM);
@@ -4877,18 +4892,26 @@
 	raw_inode = ext4_raw_inode(&iloc);
 
 	if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
-		EXT4_ERROR_INODE(inode, "root inode unallocated");
+		ext4_error_inode(inode, function, line, 0,
+				 "iget: root inode unallocated");
 		ret = -EFSCORRUPTED;
 		goto bad_inode;
 	}
 
+	if ((flags & EXT4_IGET_HANDLE) &&
+	    (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
+		ret = -ESTALE;
+		goto bad_inode;
+	}
+
 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
 			EXT4_INODE_SIZE(inode->i_sb) ||
 		    (ei->i_extra_isize & 3)) {
-			EXT4_ERROR_INODE(inode,
-					 "bad extra_isize %u (inode size %u)",
+			ext4_error_inode(inode, function, line, 0,
+					 "iget: bad extra_isize %u "
+					 "(inode size %u)",
 					 ei->i_extra_isize,
 					 EXT4_INODE_SIZE(inode->i_sb));
 			ret = -EFSCORRUPTED;
@@ -4910,7 +4933,8 @@
 	}
 
 	if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
-		EXT4_ERROR_INODE(inode, "checksum invalid");
+		ext4_error_inode(inode, function, line, 0,
+				 "iget: checksum invalid");
 		ret = -EFSBADCRC;
 		goto bad_inode;
 	}
@@ -4967,7 +4991,8 @@
 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
 	inode->i_size = ext4_isize(sb, raw_inode);
 	if ((size = i_size_read(inode)) < 0) {
-		EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
+		ext4_error_inode(inode, function, line, 0,
+				 "iget: bad i_size value: %lld", size);
 		ret = -EFSCORRUPTED;
 		goto bad_inode;
 	}
@@ -5043,7 +5068,8 @@
 	ret = 0;
 	if (ei->i_file_acl &&
 	    !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
-		EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
+		ext4_error_inode(inode, function, line, 0,
+				 "iget: bad extended attribute block %llu",
 				 ei->i_file_acl);
 		ret = -EFSCORRUPTED;
 		goto bad_inode;
@@ -5071,8 +5097,9 @@
 	} else if (S_ISLNK(inode->i_mode)) {
 		/* VFS does not allow setting these so must be corruption */
 		if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
-			EXT4_ERROR_INODE(inode,
-			  "immutable or append flags not allowed on symlinks");
+			ext4_error_inode(inode, function, line, 0,
+					 "iget: immutable or append flags "
+					 "not allowed on symlinks");
 			ret = -EFSCORRUPTED;
 			goto bad_inode;
 		}
@@ -5102,7 +5129,8 @@
 		make_bad_inode(inode);
 	} else {
 		ret = -EFSCORRUPTED;
-		EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
+		ext4_error_inode(inode, function, line, 0,
+				 "iget: bogus i_mode (%o)", inode->i_mode);
 		goto bad_inode;
 	}
 	brelse(iloc.bh);
@@ -5116,13 +5144,6 @@
 	return ERR_PTR(ret);
 }
 
-struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
-{
-	if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
-		return ERR_PTR(-EFSCORRUPTED);
-	return ext4_iget(sb, ino);
-}
-
 static int ext4_inode_blocks_set(handle_t *handle,
 				struct ext4_inode *raw_inode,
 				struct ext4_inode_info *ei)
@@ -5411,9 +5432,13 @@
 {
 	int err;
 
-	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
+	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
+	    sb_rdonly(inode->i_sb))
 		return 0;
 
+	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+		return -EIO;
+
 	if (EXT4_SB(inode->i_sb)->s_journal) {
 		if (ext4_journal_current_handle()) {
 			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
@@ -5429,7 +5454,8 @@
 		if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
 			return 0;
 
-		err = ext4_force_commit(inode->i_sb);
+		err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
+						EXT4_I(inode)->i_sync_tid);
 	} else {
 		struct ext4_iloc iloc;
 
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0edee31..d37dafa 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -125,7 +125,7 @@
 	    !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
+	inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
 	if (IS_ERR(inode_bl))
 		return PTR_ERR(inode_bl);
 	ei_bl = EXT4_I(inode_bl);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 61a9d19..a98bfca 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -116,9 +116,9 @@
 	int i, retval = 0;
 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 
-	bh = sb_bread(inode->i_sb, pblock);
-	if (!bh)
-		return -EIO;
+	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 
 	i_data = (__le32 *)bh->b_data;
 	for (i = 0; i < max_entries; i++) {
@@ -145,9 +145,9 @@
 	int i, retval = 0;
 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 
-	bh = sb_bread(inode->i_sb, pblock);
-	if (!bh)
-		return -EIO;
+	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 
 	i_data = (__le32 *)bh->b_data;
 	for (i = 0; i < max_entries; i++) {
@@ -175,9 +175,9 @@
 	int i, retval = 0;
 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 
-	bh = sb_bread(inode->i_sb, pblock);
-	if (!bh)
-		return -EIO;
+	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 
 	i_data = (__le32 *)bh->b_data;
 	for (i = 0; i < max_entries; i++) {
@@ -224,9 +224,9 @@
 	struct buffer_head *bh;
 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 
-	bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
-	if (!bh)
-		return -EIO;
+	bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 
 	tmp_idata = (__le32 *)bh->b_data;
 	for (i = 0; i < max_entries; i++) {
@@ -254,9 +254,9 @@
 	struct buffer_head *bh;
 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
 
-	bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
-	if (!bh)
-		return -EIO;
+	bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 
 	tmp_idata = (__le32 *)bh->b_data;
 	for (i = 0; i < max_entries; i++) {
@@ -382,9 +382,9 @@
 	struct ext4_extent_header *eh;
 
 	block = ext4_idx_pblock(ix);
-	bh = sb_bread(inode->i_sb, block);
-	if (!bh)
-		return -EIO;
+	bh = ext4_sb_bread(inode->i_sb, block, 0);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 
 	eh = (struct ext4_extent_header *)bh->b_data;
 	if (eh->eh_depth != 0) {
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index ffa2575..4f8de2b 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1571,7 +1571,7 @@
 					 dentry);
 			return ERR_PTR(-EFSCORRUPTED);
 		}
-		inode = ext4_iget_normal(dir->i_sb, ino);
+		inode = ext4_iget(dir->i_sb, ino, EXT4_IGET_NORMAL);
 		if (inode == ERR_PTR(-ESTALE)) {
 			EXT4_ERROR_INODE(dir,
 					 "deleted inode referenced: %u",
@@ -1613,7 +1613,7 @@
 		return ERR_PTR(-EFSCORRUPTED);
 	}
 
-	return d_obtain_alias(ext4_iget_normal(child->d_sb, ino));
+	return d_obtain_alias(ext4_iget(child->d_sb, ino, EXT4_IGET_NORMAL));
 }
 
 /*
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index a5efee3..48421de 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -127,10 +127,12 @@
 	else if (free_blocks_count < 0)
 		ext4_warning(sb, "Bad blocks count %u",
 			     input->blocks_count);
-	else if (!(bh = sb_bread(sb, end - 1)))
+	else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
+		err = PTR_ERR(bh);
+		bh = NULL;
 		ext4_warning(sb, "Cannot read last block (%llu)",
 			     end - 1);
-	else if (outside(input->block_bitmap, start, end))
+	} else if (outside(input->block_bitmap, start, end))
 		ext4_warning(sb, "Block bitmap not in group (block %llu)",
 			     (unsigned long long)input->block_bitmap);
 	else if (outside(input->inode_bitmap, start, end))
@@ -781,11 +783,11 @@
 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
 	unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
 	ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
-	struct buffer_head **o_group_desc, **n_group_desc;
-	struct buffer_head *dind;
-	struct buffer_head *gdb_bh;
+	struct buffer_head **o_group_desc, **n_group_desc = NULL;
+	struct buffer_head *dind = NULL;
+	struct buffer_head *gdb_bh = NULL;
 	int gdbackups;
-	struct ext4_iloc iloc;
+	struct ext4_iloc iloc = { .bh = NULL };
 	__le32 *data;
 	int err;
 
@@ -794,21 +796,22 @@
 		       "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
 		       gdb_num);
 
-	gdb_bh = sb_bread(sb, gdblock);
-	if (!gdb_bh)
-		return -EIO;
+	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
+	if (IS_ERR(gdb_bh))
+		return PTR_ERR(gdb_bh);
 
 	gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
 	if (gdbackups < 0) {
 		err = gdbackups;
-		goto exit_bh;
+		goto errout;
 	}
 
 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
-	dind = sb_bread(sb, le32_to_cpu(*data));
-	if (!dind) {
-		err = -EIO;
-		goto exit_bh;
+	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
+	if (IS_ERR(dind)) {
+		err = PTR_ERR(dind);
+		dind = NULL;
+		goto errout;
 	}
 
 	data = (__le32 *)dind->b_data;
@@ -816,18 +819,18 @@
 		ext4_warning(sb, "new group %u GDT block %llu not reserved",
 			     group, gdblock);
 		err = -EINVAL;
-		goto exit_dind;
+		goto errout;
 	}
 
 	BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
 	if (unlikely(err))
-		goto exit_dind;
+		goto errout;
 
 	BUFFER_TRACE(gdb_bh, "get_write_access");
 	err = ext4_journal_get_write_access(handle, gdb_bh);
 	if (unlikely(err))
-		goto exit_dind;
+		goto errout;
 
 	BUFFER_TRACE(dind, "get_write_access");
 	err = ext4_journal_get_write_access(handle, dind);
@@ -837,7 +840,7 @@
 	/* ext4_reserve_inode_write() gets a reference on the iloc */
 	err = ext4_reserve_inode_write(handle, inode, &iloc);
 	if (unlikely(err))
-		goto exit_dind;
+		goto errout;
 
 	n_group_desc = ext4_kvmalloc((gdb_num + 1) *
 				     sizeof(struct buffer_head *),
@@ -846,7 +849,7 @@
 		err = -ENOMEM;
 		ext4_warning(sb, "not enough memory for %lu groups",
 			     gdb_num + 1);
-		goto exit_inode;
+		goto errout;
 	}
 
 	/*
@@ -862,7 +865,7 @@
 	err = ext4_handle_dirty_metadata(handle, NULL, dind);
 	if (unlikely(err)) {
 		ext4_std_error(sb, err);
-		goto exit_inode;
+		goto errout;
 	}
 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
 			   (9 - EXT4_SB(sb)->s_cluster_bits);
@@ -871,8 +874,7 @@
 	err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
 	if (unlikely(err)) {
 		ext4_std_error(sb, err);
-		iloc.bh = NULL;
-		goto exit_inode;
+		goto errout;
 	}
 	brelse(dind);
 
@@ -888,15 +890,11 @@
 	err = ext4_handle_dirty_super(handle, sb);
 	if (err)
 		ext4_std_error(sb, err);
-
 	return err;
-
-exit_inode:
+errout:
 	kvfree(n_group_desc);
 	brelse(iloc.bh);
-exit_dind:
 	brelse(dind);
-exit_bh:
 	brelse(gdb_bh);
 
 	ext4_debug("leaving with error %d\n", err);
@@ -916,9 +914,9 @@
 
 	gdblock = ext4_meta_bg_first_block_no(sb, group) +
 		   ext4_bg_has_super(sb, group);
-	gdb_bh = sb_bread(sb, gdblock);
-	if (!gdb_bh)
-		return -EIO;
+	gdb_bh = ext4_sb_bread(sb, gdblock, 0);
+	if (IS_ERR(gdb_bh))
+		return PTR_ERR(gdb_bh);
 	n_group_desc = ext4_kvmalloc((gdb_num + 1) *
 				     sizeof(struct buffer_head *),
 				     GFP_NOFS);
@@ -975,9 +973,10 @@
 		return -ENOMEM;
 
 	data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
-	dind = sb_bread(sb, le32_to_cpu(*data));
-	if (!dind) {
-		err = -EIO;
+	dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
+	if (IS_ERR(dind)) {
+		err = PTR_ERR(dind);
+		dind = NULL;
 		goto exit_free;
 	}
 
@@ -996,9 +995,10 @@
 			err = -EINVAL;
 			goto exit_bh;
 		}
-		primary[res] = sb_bread(sb, blk);
-		if (!primary[res]) {
-			err = -EIO;
+		primary[res] = ext4_sb_bread(sb, blk, 0);
+		if (IS_ERR(primary[res])) {
+			err = PTR_ERR(primary[res]);
+			primary[res] = NULL;
 			goto exit_bh;
 		}
 		gdbackups = verify_reserved_gdb(sb, group, primary[res]);
@@ -1631,13 +1631,13 @@
 	}
 
 	if (reserved_gdb || gdb_off == 0) {
-		if (ext4_has_feature_resize_inode(sb) ||
+		if (!ext4_has_feature_resize_inode(sb) ||
 		    !le16_to_cpu(es->s_reserved_gdt_blocks)) {
 			ext4_warning(sb,
 				     "No reserved GDT blocks, can't resize");
 			return -EPERM;
 		}
-		inode = ext4_iget(sb, EXT4_RESIZE_INO);
+		inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
 		if (IS_ERR(inode)) {
 			ext4_warning(sb, "Error opening resize inode");
 			return PTR_ERR(inode);
@@ -1965,7 +1965,8 @@
 		}
 
 		if (!resize_inode)
-			resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
+			resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
+						 EXT4_IGET_SPECIAL);
 		if (IS_ERR(resize_inode)) {
 			ext4_warning(sb, "Error opening resize inode");
 			return PTR_ERR(resize_inode);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 8a149df..a1cf7d6 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -140,6 +140,29 @@
 MODULE_ALIAS("ext3");
 #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
 
+/*
+ * This works like sb_bread() except it uses ERR_PTR for error
+ * returns.  Currently with sb_bread it's impossible to distinguish
+ * between ENOMEM and EIO situations (since both result in a NULL
+ * return.
+ */
+struct buffer_head *
+ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
+{
+	struct buffer_head *bh = sb_getblk(sb, block);
+
+	if (bh == NULL)
+		return ERR_PTR(-ENOMEM);
+	if (buffer_uptodate(bh))
+		return bh;
+	ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
+	wait_on_buffer(bh);
+	if (buffer_uptodate(bh))
+		return bh;
+	put_bh(bh);
+	return ERR_PTR(-EIO);
+}
+
 static int ext4_verify_csum_type(struct super_block *sb,
 				 struct ext4_super_block *es)
 {
@@ -1150,20 +1173,11 @@
 {
 	struct inode *inode;
 
-	if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
-		return ERR_PTR(-ESTALE);
-	if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
-		return ERR_PTR(-ESTALE);
-
-	/* iget isn't really right if the inode is currently unallocated!!
-	 *
-	 * ext4_read_inode will return a bad_inode if the inode had been
-	 * deleted, so we should be safe.
-	 *
+	/*
 	 * Currently we don't know the generation for parent directory, so
 	 * a generation of 0 means "accept any"
 	 */
-	inode = ext4_iget_normal(sb, ino);
+	inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
 	if (IS_ERR(inode))
 		return ERR_CAST(inode);
 	if (generation && inode->i_generation != generation) {
@@ -1188,6 +1202,16 @@
 				    ext4_nfs_get_inode);
 }
 
+static int ext4_nfs_commit_metadata(struct inode *inode)
+{
+	struct writeback_control wbc = {
+		.sync_mode = WB_SYNC_ALL
+	};
+
+	trace_ext4_nfs_commit_metadata(inode);
+	return ext4_write_inode(inode, &wbc);
+}
+
 /*
  * Try to release metadata pages (indirect blocks, directories) which are
  * mapped via the block device.  Since these pages could have journal heads
@@ -1392,6 +1416,7 @@
 	.fh_to_dentry = ext4_fh_to_dentry,
 	.fh_to_parent = ext4_fh_to_parent,
 	.get_parent = ext4_get_parent,
+	.commit_metadata = ext4_nfs_commit_metadata,
 };
 
 enum {
@@ -4327,7 +4352,7 @@
 	 * so we can safely mount the rest of the filesystem now.
 	 */
 
-	root = ext4_iget(sb, EXT4_ROOT_INO);
+	root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
 	if (IS_ERR(root)) {
 		ext4_msg(sb, KERN_ERR, "get root inode failed");
 		ret = PTR_ERR(root);
@@ -4597,7 +4622,7 @@
 	 * happen if we iget() an unused inode, as the subsequent iput()
 	 * will try to delete it.
 	 */
-	journal_inode = ext4_iget(sb, journal_inum);
+	journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
 	if (IS_ERR(journal_inode)) {
 		ext4_msg(sb, KERN_ERR, "no journal found");
 		return NULL;
@@ -4879,7 +4904,7 @@
 	ext4_superblock_csum_set(sb);
 	if (sync)
 		lock_buffer(sbh);
-	if (buffer_write_io_error(sbh)) {
+	if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
 		/*
 		 * Oh, dear.  A previous attempt to write the
 		 * superblock failed.  This could happen because the
@@ -5679,7 +5704,7 @@
 	if (!qf_inums[type])
 		return -EPERM;
 
-	qf_inode = ext4_iget(sb, qf_inums[type]);
+	qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
 	if (IS_ERR(qf_inode)) {
 		ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
 		return PTR_ERR(qf_inode);
@@ -5689,9 +5714,9 @@
 	qf_inode->i_flags |= S_NOQUOTA;
 	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
 	err = dquot_enable(qf_inode, type, format_id, flags);
-	iput(qf_inode);
 	if (err)
 		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
+	iput(qf_inode);
 
 	return err;
 }
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 4380c86..c0ba520 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -384,7 +384,7 @@
 	struct inode *inode;
 	int err;
 
-	inode = ext4_iget(parent->i_sb, ea_ino);
+	inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
 	if (IS_ERR(inode)) {
 		err = PTR_ERR(inode);
 		ext4_error(parent->i_sb,
@@ -522,14 +522,13 @@
 	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
 		  name_index, name, buffer, (long)buffer_size);
 
-	error = -ENODATA;
 	if (!EXT4_I(inode)->i_file_acl)
-		goto cleanup;
+		return -ENODATA;
 	ea_idebug(inode, "reading block %llu",
 		  (unsigned long long)EXT4_I(inode)->i_file_acl);
-	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-	if (!bh)
-		goto cleanup;
+	bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 	ea_bdebug(bh, "b_count=%d, refcount=%d",
 		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
 	error = ext4_xattr_check_block(inode, bh);
@@ -696,26 +695,23 @@
 	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
 		  buffer, (long)buffer_size);
 
-	error = 0;
 	if (!EXT4_I(inode)->i_file_acl)
-		goto cleanup;
+		return 0;
 	ea_idebug(inode, "reading block %llu",
 		  (unsigned long long)EXT4_I(inode)->i_file_acl);
-	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-	error = -EIO;
-	if (!bh)
-		goto cleanup;
+	bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+	if (IS_ERR(bh))
+		return PTR_ERR(bh);
 	ea_bdebug(bh, "b_count=%d, refcount=%d",
 		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
 	error = ext4_xattr_check_block(inode, bh);
 	if (error)
 		goto cleanup;
 	ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
-	error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
-
+	error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer,
+					buffer_size);
 cleanup:
 	brelse(bh);
-
 	return error;
 }
 
@@ -830,9 +826,9 @@
 	}
 
 	if (EXT4_I(inode)->i_file_acl) {
-		bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-		if (!bh) {
-			ret = -EIO;
+		bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+		if (IS_ERR(bh)) {
+			ret = PTR_ERR(bh);
 			goto out;
 		}
 
@@ -1490,7 +1486,8 @@
 	}
 
 	while (ce) {
-		ea_inode = ext4_iget(inode->i_sb, ce->e_value);
+		ea_inode = ext4_iget(inode->i_sb, ce->e_value,
+				     EXT4_IGET_NORMAL);
 		if (!IS_ERR(ea_inode) &&
 		    !is_bad_inode(ea_inode) &&
 		    (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
@@ -1825,16 +1822,15 @@
 
 	if (EXT4_I(inode)->i_file_acl) {
 		/* The inode already has an extended attribute block. */
-		bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
-		error = -EIO;
-		if (!bs->bh)
-			goto cleanup;
+		bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+		if (IS_ERR(bs->bh))
+			return PTR_ERR(bs->bh);
 		ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
 			atomic_read(&(bs->bh->b_count)),
 			le32_to_cpu(BHDR(bs->bh)->h_refcount));
 		error = ext4_xattr_check_block(inode, bs->bh);
 		if (error)
-			goto cleanup;
+			return error;
 		/* Find the named attribute. */
 		bs->s.base = BHDR(bs->bh);
 		bs->s.first = BFIRST(bs->bh);
@@ -1843,13 +1839,10 @@
 		error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
 					 i->name_index, i->name, 1);
 		if (error && error != -ENODATA)
-			goto cleanup;
+			return error;
 		bs->s.not_found = error;
 	}
-	error = 0;
-
-cleanup:
-	return error;
+	return 0;
 }
 
 static int
@@ -2278,9 +2271,9 @@
 
 	if (!EXT4_I(inode)->i_file_acl)
 		return NULL;
-	bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-	if (!bh)
-		return ERR_PTR(-EIO);
+	bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+	if (IS_ERR(bh))
+		return bh;
 	error = ext4_xattr_check_block(inode, bh);
 	if (error) {
 		brelse(bh);
@@ -2733,7 +2726,7 @@
 	base = IFIRST(header);
 	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
 	min_offs = end - base;
-	total_ino = sizeof(struct ext4_xattr_ibody_header);
+	total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
 
 	error = xattr_check_inode(inode, header, end);
 	if (error)
@@ -2750,10 +2743,11 @@
 	if (EXT4_I(inode)->i_file_acl) {
 		struct buffer_head *bh;
 
-		bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-		error = -EIO;
-		if (!bh)
+		bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+		if (IS_ERR(bh)) {
+			error = PTR_ERR(bh);
 			goto cleanup;
+		}
 		error = ext4_xattr_check_block(inode, bh);
 		if (error) {
 			brelse(bh);
@@ -2907,11 +2901,12 @@
 	}
 
 	if (EXT4_I(inode)->i_file_acl) {
-		bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
-		if (!bh) {
-			EXT4_ERROR_INODE(inode, "block %llu read error",
-					 EXT4_I(inode)->i_file_acl);
-			error = -EIO;
+		bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+		if (IS_ERR(bh)) {
+			error = PTR_ERR(bh);
+			if (error == -EIO)
+				EXT4_ERROR_INODE(inode, "block %llu read error",
+						 EXT4_I(inode)->i_file_acl);
 			goto cleanup;
 		}
 		error = ext4_xattr_check_block(inode, bh);
@@ -3064,8 +3059,10 @@
 	while (ce) {
 		struct buffer_head *bh;
 
-		bh = sb_bread(inode->i_sb, ce->e_value);
-		if (!bh) {
+		bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
+		if (IS_ERR(bh)) {
+			if (PTR_ERR(bh) == -ENOMEM)
+				return NULL;
 			EXT4_ERROR_INODE(inode, "block %lu read error",
 					 (unsigned long)ce->e_value);
 		} else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index fa707cd..63e5995 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -160,7 +160,7 @@
 	return (void *)f2fs_acl;
 
 fail:
-	kfree(f2fs_acl);
+	kvfree(f2fs_acl);
 	return ERR_PTR(-EINVAL);
 }
 
@@ -190,7 +190,7 @@
 		acl = NULL;
 	else
 		acl = ERR_PTR(retval);
-	kfree(value);
+	kvfree(value);
 
 	return acl;
 }
@@ -240,7 +240,7 @@
 
 	error = f2fs_setxattr(inode, name_index, "", value, size, ipage, 0);
 
-	kfree(value);
+	kvfree(value);
 	if (!error)
 		set_cached_acl(inode, type, acl);
 
@@ -352,12 +352,14 @@
 		return PTR_ERR(p);
 
 	clone = f2fs_acl_clone(p, GFP_NOFS);
-	if (!clone)
-		goto no_mem;
+	if (!clone) {
+		ret = -ENOMEM;
+		goto release_acl;
+	}
 
 	ret = f2fs_acl_create_masq(clone, mode);
 	if (ret < 0)
-		goto no_mem_clone;
+		goto release_clone;
 
 	if (ret == 0)
 		posix_acl_release(clone);
@@ -371,11 +373,11 @@
 
 	return 0;
 
-no_mem_clone:
+release_clone:
 	posix_acl_release(clone);
-no_mem:
+release_acl:
 	posix_acl_release(p);
-	return -ENOMEM;
+	return ret;
 }
 
 int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 9c28ea4..f955cd3 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -44,7 +44,7 @@
 		cond_resched();
 		goto repeat;
 	}
-	f2fs_wait_on_page_writeback(page, META, true);
+	f2fs_wait_on_page_writeback(page, META, true, true);
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
 	return page;
@@ -370,9 +370,8 @@
 				goto continue_unlock;
 			}
 
-			f2fs_wait_on_page_writeback(page, META, true);
+			f2fs_wait_on_page_writeback(page, META, true, true);
 
-			BUG_ON(PageWriteback(page));
 			if (!clear_page_dirty_for_io(page))
 				goto continue_unlock;
 
@@ -911,7 +910,7 @@
 	f2fs_put_page(cp1, 1);
 	f2fs_put_page(cp2, 1);
 fail_no_cp:
-	kfree(sbi->ckpt);
+	kvfree(sbi->ckpt);
 	return -EINVAL;
 }
 
@@ -1290,11 +1289,11 @@
 	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
 	int err;
 
-	memcpy(page_address(page), src, PAGE_SIZE);
-	set_page_dirty(page);
+	f2fs_wait_on_page_writeback(page, META, true, true);
 
-	f2fs_wait_on_page_writeback(page, META, true);
-	f2fs_bug_on(sbi, PageWriteback(page));
+	memcpy(page_address(page), src, PAGE_SIZE);
+
+	set_page_dirty(page);
 	if (unlikely(!clear_page_dirty_for_io(page)))
 		f2fs_bug_on(sbi, 1);
 
@@ -1328,11 +1327,9 @@
 	int err;
 
 	/* Flush all the NAT/SIT pages */
-	while (get_pages(sbi, F2FS_DIRTY_META)) {
-		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
-		if (unlikely(f2fs_cp_error(sbi)))
-			break;
-	}
+	f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+	f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
+					!f2fs_cp_error(sbi));
 
 	/*
 	 * modify checkpoint
@@ -1405,14 +1402,6 @@
 		for (i = 0; i < nm_i->nat_bits_blocks; i++)
 			f2fs_update_meta_page(sbi, nm_i->nat_bits +
 					(i << F2FS_BLKSIZE_BITS), blk + i);
-
-		/* Flush all the NAT BITS pages */
-		while (get_pages(sbi, F2FS_DIRTY_META)) {
-			f2fs_sync_meta_pages(sbi, META, LONG_MAX,
-							FS_CP_META_IO);
-			if (unlikely(f2fs_cp_error(sbi)))
-				break;
-		}
 	}
 
 	/* write out checkpoint buffer at block 0 */
@@ -1448,6 +1437,8 @@
 
 	/* Here, we have one bio having CP pack except cp pack 2 page */
 	f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
+	f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
+					!f2fs_cp_error(sbi));
 
 	/* wait for previous submitted meta pages writeback */
 	f2fs_wait_on_all_pages_writeback(sbi);
@@ -1465,7 +1456,7 @@
 	 * invalidate intermediate page cache borrowed from meta inode
 	 * which are used for migration of encrypted inode's blocks.
 	 */
-	if (f2fs_sb_has_encrypt(sbi->sb))
+	if (f2fs_sb_has_encrypt(sbi))
 		invalidate_mapping_pages(META_MAPPING(sbi),
 				MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
 
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index e9681f8..6552b48 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -143,6 +143,8 @@
 
 static void f2fs_read_end_io(struct bio *bio)
 {
+	struct page *first_page = bio->bi_io_vec[0].bv_page;
+
 	if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
 						FAULT_READ_IO)) {
 		f2fs_show_injection_info(FAULT_READ_IO);
@@ -157,6 +159,13 @@
 		return;
 	}
 
+	if (first_page != NULL &&
+		__read_io_type(first_page) == F2FS_RD_DATA) {
+		trace_android_fs_dataread_end(first_page->mapping->host,
+						page_offset(first_page),
+						bio->bi_iter.bi_size);
+	}
+
 	__read_end_io(bio);
 }
 
@@ -324,6 +333,32 @@
 	submit_bio(bio);
 }
 
+static void __f2fs_submit_read_bio(struct f2fs_sb_info *sbi,
+				struct bio *bio, enum page_type type)
+{
+	if (trace_android_fs_dataread_start_enabled() && (type == DATA)) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL &&
+			__read_io_type(first_page) == F2FS_RD_DATA) {
+			char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+			path = android_fstrace_get_pathname(pathbuf,
+						MAX_TRACE_PATHBUF_LEN,
+						first_page->mapping->host);
+
+			trace_android_fs_dataread_start(
+				first_page->mapping->host,
+				page_offset(first_page),
+				bio->bi_iter.bi_size,
+				current->pid,
+				path,
+				current->comm);
+		}
+	}
+	__submit_bio(sbi, bio, type);
+}
+
 static void __submit_merged_bio(struct f2fs_bio_info *io)
 {
 	struct f2fs_io_info *fio = &io->fio;
@@ -373,29 +408,6 @@
 	return false;
 }
 
-static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
-						struct page *page, nid_t ino,
-						enum page_type type)
-{
-	enum page_type btype = PAGE_TYPE_OF_BIO(type);
-	enum temp_type temp;
-	struct f2fs_bio_info *io;
-	bool ret = false;
-
-	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
-		io = sbi->write_io[btype] + temp;
-
-		down_read(&io->io_rwsem);
-		ret = __has_merged_page(io, inode, page, ino);
-		up_read(&io->io_rwsem);
-
-		/* TODO: use HOT temp only for meta pages now. */
-		if (ret || btype == META)
-			break;
-	}
-	return ret;
-}
-
 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
 				enum page_type type, enum temp_type temp)
 {
@@ -421,13 +433,19 @@
 				nid_t ino, enum page_type type, bool force)
 {
 	enum temp_type temp;
-
-	if (!force && !has_merged_page(sbi, inode, page, ino, type))
-		return;
+	bool ret = true;
 
 	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
+		if (!force)	{
+			enum page_type btype = PAGE_TYPE_OF_BIO(type);
+			struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
 
-		__f2fs_submit_merged_write(sbi, type, temp);
+			down_read(&io->io_rwsem);
+			ret = __has_merged_page(io, inode, page, ino);
+			up_read(&io->io_rwsem);
+		}
+		if (ret)
+			__f2fs_submit_merged_write(sbi, type, temp);
 
 		/* TODO: use HOT temp only for meta pages now. */
 		if (type >= META)
@@ -488,7 +506,7 @@
 	inc_page_count(fio->sbi, is_read_io(fio->op) ?
 			__read_io_type(page): WB_DATA_TYPE(fio->page));
 
-	__submit_bio(fio->sbi, bio, fio->type);
+	__f2fs_submit_read_bio(fio->sbi, bio, fio->type);
 	return 0;
 }
 
@@ -618,7 +636,7 @@
 	}
 	ClearPageError(page);
 	inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
-	__submit_bio(F2FS_I_SB(inode), bio, DATA);
+	__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
 	return 0;
 }
 
@@ -644,7 +662,7 @@
  */
 void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
 {
-	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
 	__set_data_blkaddr(dn);
 	if (set_page_dirty(dn->node_page))
 		dn->node_changed = true;
@@ -674,7 +692,7 @@
 	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
 						dn->ofs_in_node, count);
 
-	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
 
 	for (; count > 0; dn->ofs_in_node++) {
 		block_t blkaddr = datablock_addr(dn->inode,
@@ -958,6 +976,9 @@
 			return err;
 	}
 
+	if (direct_io && allow_outplace_dio(inode, iocb, from))
+		return 0;
+
 	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
 		return 0;
 
@@ -971,6 +992,7 @@
 	map.m_next_pgofs = NULL;
 	map.m_next_extent = NULL;
 	map.m_seg_type = NO_CHECK_TYPE;
+	map.m_may_create = true;
 
 	if (direct_io) {
 		map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
@@ -1029,7 +1051,7 @@
 	unsigned int maxblocks = map->m_len;
 	struct dnode_of_data dn;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
+	int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
 	pgoff_t pgofs, end_offset, end;
 	int err = 0, ofs = 1;
 	unsigned int ofs_in_node, last_ofs_in_node;
@@ -1049,6 +1071,10 @@
 	end = pgofs + maxblocks;
 
 	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
+							map->m_may_create)
+			goto next_dnode;
+
 		map->m_pblk = ei.blk + pgofs - ei.fofs;
 		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
 		map->m_flags = F2FS_MAP_MAPPED;
@@ -1063,7 +1089,7 @@
 	}
 
 next_dnode:
-	if (create)
+	if (map->m_may_create)
 		__do_map_lock(sbi, flag, true);
 
 	/* When reading holes, we need its node page */
@@ -1100,11 +1126,13 @@
 
 	if (is_valid_data_blkaddr(sbi, blkaddr)) {
 		/* use out-place-update for driect IO under LFS mode */
-		if (test_opt(sbi, LFS) && create &&
-				flag == F2FS_GET_BLOCK_DIO) {
+		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
+							map->m_may_create) {
 			err = __allocate_data_block(&dn, map->m_seg_type);
-			if (!err)
+			if (!err) {
+				blkaddr = dn.data_blkaddr;
 				set_inode_flag(inode, FI_APPEND_WRITE);
+			}
 		}
 	} else {
 		if (create) {
@@ -1210,7 +1238,7 @@
 
 	f2fs_put_dnode(&dn);
 
-	if (create) {
+	if (map->m_may_create) {
 		__do_map_lock(sbi, flag, false);
 		f2fs_balance_fs(sbi, dn.node_changed);
 	}
@@ -1236,7 +1264,7 @@
 	}
 	f2fs_put_dnode(&dn);
 unlock_out:
-	if (create) {
+	if (map->m_may_create) {
 		__do_map_lock(sbi, flag, false);
 		f2fs_balance_fs(sbi, dn.node_changed);
 	}
@@ -1258,6 +1286,7 @@
 	map.m_next_pgofs = NULL;
 	map.m_next_extent = NULL;
 	map.m_seg_type = NO_CHECK_TYPE;
+	map.m_may_create = false;
 	last_lblk = F2FS_BLK_ALIGN(pos + len);
 
 	while (map.m_lblk < last_lblk) {
@@ -1272,7 +1301,7 @@
 
 static int __get_data_block(struct inode *inode, sector_t iblock,
 			struct buffer_head *bh, int create, int flag,
-			pgoff_t *next_pgofs, int seg_type)
+			pgoff_t *next_pgofs, int seg_type, bool may_write)
 {
 	struct f2fs_map_blocks map;
 	int err;
@@ -1282,6 +1311,7 @@
 	map.m_next_pgofs = next_pgofs;
 	map.m_next_extent = NULL;
 	map.m_seg_type = seg_type;
+	map.m_may_create = may_write;
 
 	err = f2fs_map_blocks(inode, &map, create, flag);
 	if (!err) {
@@ -1298,16 +1328,25 @@
 {
 	return __get_data_block(inode, iblock, bh_result, create,
 							flag, next_pgofs,
-							NO_CHECK_TYPE);
+							NO_CHECK_TYPE, create);
+}
+
+static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
+			struct buffer_head *bh_result, int create)
+{
+	return __get_data_block(inode, iblock, bh_result, create,
+				F2FS_GET_BLOCK_DIO, NULL,
+				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
+				true);
 }
 
 static int get_data_block_dio(struct inode *inode, sector_t iblock,
 			struct buffer_head *bh_result, int create)
 {
 	return __get_data_block(inode, iblock, bh_result, create,
-						F2FS_GET_BLOCK_DIO, NULL,
-						f2fs_rw_hint_to_seg_type(
-							inode->i_write_hint));
+				F2FS_GET_BLOCK_DIO, NULL,
+				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
+				false);
 }
 
 static int get_data_block_bmap(struct inode *inode, sector_t iblock,
@@ -1319,7 +1358,7 @@
 
 	return __get_data_block(inode, iblock, bh_result, create,
 						F2FS_GET_BLOCK_BMAP, NULL,
-						NO_CHECK_TYPE);
+						NO_CHECK_TYPE, create);
 }
 
 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
@@ -1526,6 +1565,7 @@
 	map.m_next_pgofs = NULL;
 	map.m_next_extent = NULL;
 	map.m_seg_type = NO_CHECK_TYPE;
+	map.m_may_create = false;
 
 	for (; nr_pages; nr_pages--) {
 		if (pages) {
@@ -1596,7 +1636,7 @@
 		if (bio && (last_block_in_bio != block_nr - 1 ||
 			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
 submit_and_realloc:
-			__submit_bio(F2FS_I_SB(inode), bio, DATA);
+			__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
 			bio = NULL;
 		}
 		if (bio == NULL) {
@@ -1628,7 +1668,7 @@
 		goto next_page;
 confused:
 		if (bio) {
-			__submit_bio(F2FS_I_SB(inode), bio, DATA);
+			__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
 			bio = NULL;
 		}
 		unlock_page(page);
@@ -1638,7 +1678,7 @@
 	}
 	BUG_ON(pages && !list_empty(pages));
 	if (bio)
-		__submit_bio(F2FS_I_SB(inode), bio, DATA);
+		__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
 	return 0;
 }
 
@@ -1856,6 +1896,8 @@
 		if (fio->need_lock == LOCK_REQ)
 			f2fs_unlock_op(fio->sbi);
 		err = f2fs_inplace_write_data(fio);
+		if (err && PageWriteback(page))
+			end_page_writeback(page);
 		trace_f2fs_do_write_data_page(fio->page, IPU);
 		set_inode_flag(inode, FI_UPDATE_WRITE);
 		return err;
@@ -2143,12 +2185,11 @@
 			if (PageWriteback(page)) {
 				if (wbc->sync_mode != WB_SYNC_NONE)
 					f2fs_wait_on_page_writeback(page,
-								DATA, true);
+							DATA, true, true);
 				else
 					goto continue_unlock;
 			}
 
-			BUG_ON(PageWriteback(page));
 			if (!clear_page_dirty_for_io(page))
 				goto continue_unlock;
 
@@ -2325,6 +2366,7 @@
 	bool locked = false;
 	struct extent_info ei = {0,0,0};
 	int err = 0;
+	int flag;
 
 	/*
 	 * we already allocated all the blocks, so we don't need to get
@@ -2334,9 +2376,15 @@
 			!is_inode_flag_set(inode, FI_NO_PREALLOC))
 		return 0;
 
+	/* f2fs_lock_op avoids race between write CP and convert_inline_page */
+	if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
+		flag = F2FS_GET_BLOCK_DEFAULT;
+	else
+		flag = F2FS_GET_BLOCK_PRE_AIO;
+
 	if (f2fs_has_inline_data(inode) ||
 			(pos & PAGE_MASK) >= i_size_read(inode)) {
-		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+		__do_map_lock(sbi, flag, true);
 		locked = true;
 	}
 restart:
@@ -2374,6 +2422,7 @@
 				f2fs_put_dnode(&dn);
 				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
 								true);
+				WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
 				locked = true;
 				goto restart;
 			}
@@ -2387,7 +2436,7 @@
 	f2fs_put_dnode(&dn);
 unlock_out:
 	if (locked)
-		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+		__do_map_lock(sbi, flag, false);
 	return err;
 }
 
@@ -2468,7 +2517,7 @@
 		}
 	}
 
-	f2fs_wait_on_page_writeback(page, DATA, false);
+	f2fs_wait_on_page_writeback(page, DATA, false, true);
 
 	if (len == PAGE_SIZE || PageUptodate(page))
 		return 0;
@@ -2560,6 +2609,53 @@
 	return 0;
 }
 
+static void f2fs_dio_end_io(struct bio *bio)
+{
+	struct f2fs_private_dio *dio = bio->bi_private;
+
+	dec_page_count(F2FS_I_SB(dio->inode),
+			dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
+
+	bio->bi_private = dio->orig_private;
+	bio->bi_end_io = dio->orig_end_io;
+
+	kvfree(dio);
+
+	bio_endio(bio);
+}
+
+static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
+							loff_t file_offset)
+{
+	struct f2fs_private_dio *dio;
+	bool write = (bio_op(bio) == REQ_OP_WRITE);
+	int err;
+
+	dio = f2fs_kzalloc(F2FS_I_SB(inode),
+			sizeof(struct f2fs_private_dio), GFP_NOFS);
+	if (!dio) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	dio->inode = inode;
+	dio->orig_end_io = bio->bi_end_io;
+	dio->orig_private = bio->bi_private;
+	dio->write = write;
+
+	bio->bi_end_io = f2fs_dio_end_io;
+	bio->bi_private = dio;
+
+	inc_page_count(F2FS_I_SB(inode),
+			write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
+
+	submit_bio(bio);
+	return;
+out:
+	bio->bi_status = BLK_STS_IOERR;
+	bio_endio(bio);
+}
+
 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct address_space *mapping = iocb->ki_filp->f_mapping;
@@ -2629,7 +2725,10 @@
 			down_read(&fi->i_gc_rwsem[READ]);
 	}
 
-	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
+	err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
+			iter, rw == WRITE ? get_data_block_dio_write :
+			get_data_block_dio, NULL, f2fs_dio_submit_bio,
+			DIO_LOCKING | DIO_SKIP_HOLES);
 
 	if (do_opu)
 		up_read(&fi->i_gc_rwsem[READ]);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 139b4d5..503fde8 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -53,6 +53,8 @@
 	si->vw_cnt = atomic_read(&sbi->vw_cnt);
 	si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
 	si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
+	si->nr_dio_read = get_pages(sbi, F2FS_DIO_READ);
+	si->nr_dio_write = get_pages(sbi, F2FS_DIO_WRITE);
 	si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
 	si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
 	si->nr_rd_data = get_pages(sbi, F2FS_RD_DATA);
@@ -62,7 +64,7 @@
 		si->nr_flushed =
 			atomic_read(&SM_I(sbi)->fcc_info->issued_flush);
 		si->nr_flushing =
-			atomic_read(&SM_I(sbi)->fcc_info->issing_flush);
+			atomic_read(&SM_I(sbi)->fcc_info->queued_flush);
 		si->flush_list_empty =
 			llist_empty(&SM_I(sbi)->fcc_info->issue_list);
 	}
@@ -70,7 +72,7 @@
 		si->nr_discarded =
 			atomic_read(&SM_I(sbi)->dcc_info->issued_discard);
 		si->nr_discarding =
-			atomic_read(&SM_I(sbi)->dcc_info->issing_discard);
+			atomic_read(&SM_I(sbi)->dcc_info->queued_discard);
 		si->nr_discard_cmd =
 			atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
 		si->undiscard_blks = SM_I(sbi)->dcc_info->undiscard_blks;
@@ -94,8 +96,10 @@
 	si->free_secs = free_sections(sbi);
 	si->prefree_count = prefree_segments(sbi);
 	si->dirty_count = dirty_segments(sbi);
-	si->node_pages = NODE_MAPPING(sbi)->nrpages;
-	si->meta_pages = META_MAPPING(sbi)->nrpages;
+	if (sbi->node_inode)
+		si->node_pages = NODE_MAPPING(sbi)->nrpages;
+	if (sbi->meta_inode)
+		si->meta_pages = META_MAPPING(sbi)->nrpages;
 	si->nats = NM_I(sbi)->nat_cnt;
 	si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
 	si->sits = MAIN_SEGS(sbi);
@@ -173,7 +177,6 @@
 static void update_mem_info(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_stat_info *si = F2FS_STAT(sbi);
-	unsigned npages;
 	int i;
 
 	if (si->base_mem)
@@ -197,7 +200,7 @@
 	si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
 	si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
 	si->base_mem += SIT_VBLOCK_MAP_SIZE;
-	if (sbi->segs_per_sec > 1)
+	if (__is_large_section(sbi))
 		si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
 	si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
 
@@ -256,10 +259,14 @@
 						sizeof(struct extent_node);
 
 	si->page_mem = 0;
-	npages = NODE_MAPPING(sbi)->nrpages;
-	si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
-	npages = META_MAPPING(sbi)->nrpages;
-	si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+	if (sbi->node_inode) {
+		unsigned npages = NODE_MAPPING(sbi)->nrpages;
+		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+	}
+	if (sbi->meta_inode) {
+		unsigned npages = META_MAPPING(sbi)->nrpages;
+		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+	}
 }
 
 static int stat_show(struct seq_file *s, void *v)
@@ -374,6 +381,8 @@
 		seq_printf(s, "  - Inner Struct Count: tree: %d(%d), node: %d\n",
 				si->ext_tree, si->zombie_tree, si->ext_node);
 		seq_puts(s, "\nBalancing F2FS Async:\n");
+		seq_printf(s, "  - DIO (R: %4d, W: %4d)\n",
+			   si->nr_dio_read, si->nr_dio_write);
 		seq_printf(s, "  - IO_R (Data: %4d, Node: %4d, Meta: %4d\n",
 			   si->nr_rd_data, si->nr_rd_node, si->nr_rd_meta);
 		seq_printf(s, "  - IO_W (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), "
@@ -444,18 +453,7 @@
 	return 0;
 }
 
-static int stat_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, stat_show, inode->i_private);
-}
-
-static const struct file_operations stat_fops = {
-	.owner = THIS_MODULE,
-	.open = stat_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(stat);
 
 int f2fs_build_stats(struct f2fs_sb_info *sbi)
 {
@@ -510,7 +508,7 @@
 	list_del(&si->stat_list);
 	mutex_unlock(&f2fs_stat_mutex);
 
-	kfree(si);
+	kvfree(si);
 }
 
 int __init f2fs_create_root_stats(void)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 2ef84b4..7ff9e99 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -293,7 +293,7 @@
 {
 	enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
 	lock_page(page);
-	f2fs_wait_on_page_writeback(page, type, true);
+	f2fs_wait_on_page_writeback(page, type, true, true);
 	de->ino = cpu_to_le32(inode->i_ino);
 	set_de_type(de, inode->i_mode);
 	set_page_dirty(page);
@@ -307,7 +307,7 @@
 {
 	struct f2fs_inode *ri;
 
-	f2fs_wait_on_page_writeback(ipage, NODE, true);
+	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 
 	/* copy name info. to this inode page */
 	ri = F2FS_INODE(ipage);
@@ -550,7 +550,7 @@
 	++level;
 	goto start;
 add_dentry:
-	f2fs_wait_on_page_writeback(dentry_page, DATA, true);
+	f2fs_wait_on_page_writeback(dentry_page, DATA, true, true);
 
 	if (inode) {
 		down_write(&F2FS_I(inode)->i_sem);
@@ -705,7 +705,7 @@
 		return f2fs_delete_inline_entry(dentry, page, dir, inode);
 
 	lock_page(page);
-	f2fs_wait_on_page_writeback(page, DATA, true);
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
 
 	dentry_blk = page_address(page);
 	bit_pos = dentry - dentry_blk->dentry;
@@ -808,6 +808,17 @@
 		de_name.name = d->filename[bit_pos];
 		de_name.len = le16_to_cpu(de->name_len);
 
+		/* check memory boundary before moving forward */
+		bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
+		if (unlikely(bit_pos > d->max)) {
+			f2fs_msg(sbi->sb, KERN_WARNING,
+				"%s: corrupted namelen=%d, run fsck to fix.",
+				__func__, le16_to_cpu(de->name_len));
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			err = -EINVAL;
+			goto out;
+		}
+
 		if (f2fs_encrypted_inode(d->inode)) {
 			int save_len = fstr->len;
 
@@ -830,7 +841,6 @@
 		if (readdir_ra)
 			f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
 
-		bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
 		ctx->pos = start_pos + bit_pos;
 	}
 out:
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 56204a8..86536e9 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -67,7 +67,7 @@
 	unsigned int inject_type;
 };
 
-extern char *f2fs_fault_name[FAULT_MAX];
+extern const char *f2fs_fault_name[FAULT_MAX];
 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
 #endif
 
@@ -152,12 +152,13 @@
 #define F2FS_FEATURE_VERITY		0x0400	/* reserved */
 #define F2FS_FEATURE_SB_CHKSUM		0x0800
 
-#define F2FS_HAS_FEATURE(sb, mask)					\
-	((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
-#define F2FS_SET_FEATURE(sb, mask)					\
-	(F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask))
-#define F2FS_CLEAR_FEATURE(sb, mask)					\
-	(F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask))
+#define __F2FS_HAS_FEATURE(raw_super, mask)				\
+	((raw_super->feature & cpu_to_le32(mask)) != 0)
+#define F2FS_HAS_FEATURE(sbi, mask)	__F2FS_HAS_FEATURE(sbi->raw_super, mask)
+#define F2FS_SET_FEATURE(sbi, mask)					\
+	(sbi->raw_super->feature |= cpu_to_le32(mask))
+#define F2FS_CLEAR_FEATURE(sbi, mask)					\
+	(sbi->raw_super->feature &= ~cpu_to_le32(mask))
 
 /*
  * Default values for user and/or group using reserved blocks
@@ -284,7 +285,7 @@
 	struct block_device *bdev;	/* bdev */
 	unsigned short ref;		/* reference count */
 	unsigned char state;		/* state */
-	unsigned char issuing;		/* issuing discard */
+	unsigned char queued;		/* queued discard */
 	int error;			/* bio error */
 	spinlock_t lock;		/* for state/bio_ref updating */
 	unsigned short bio_ref;		/* bio reference count */
@@ -326,7 +327,7 @@
 	unsigned int undiscard_blks;		/* # of undiscard blocks */
 	unsigned int next_pos;			/* next discard position */
 	atomic_t issued_discard;		/* # of issued discard */
-	atomic_t issing_discard;		/* # of issing discard */
+	atomic_t queued_discard;		/* # of queued discard */
 	atomic_t discard_cmd_cnt;		/* # of cached cmd count */
 	struct rb_root_cached root;		/* root of discard rb-tree */
 	bool rbtree_check;			/* config for consistence check */
@@ -416,6 +417,7 @@
 #define F2FS_GOING_DOWN_METASYNC	0x1	/* going down with metadata */
 #define F2FS_GOING_DOWN_NOSYNC		0x2	/* going down */
 #define F2FS_GOING_DOWN_METAFLUSH	0x3	/* going down with meta flush */
+#define F2FS_GOING_DOWN_NEED_FSCK	0x4	/* going down to trigger fsck */
 
 #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 /*
@@ -557,16 +559,8 @@
 };
 
 struct extent_node {
-	struct rb_node rb_node;
-	union {
-		struct {
-			unsigned int fofs;
-			unsigned int len;
-			u32 blk;
-		};
-		struct extent_info ei;	/* extent info */
-
-	};
+	struct rb_node rb_node;		/* rb node located in rb-tree */
+	struct extent_info ei;		/* extent info */
 	struct list_head list;		/* node in global extent list of sbi */
 	struct extent_tree *et;		/* extent tree pointer */
 };
@@ -601,6 +595,7 @@
 	pgoff_t *m_next_pgofs;		/* point next possible non-hole pgofs */
 	pgoff_t *m_next_extent;		/* point to next possible extent */
 	int m_seg_type;
+	bool m_may_create;		/* indicate it is from write path */
 };
 
 /* for flag in get_data_block */
@@ -889,7 +884,7 @@
 	struct task_struct *f2fs_issue_flush;	/* flush thread */
 	wait_queue_head_t flush_wait_queue;	/* waiting queue for wake-up */
 	atomic_t issued_flush;			/* # of issued flushes */
-	atomic_t issing_flush;			/* # of issing flushes */
+	atomic_t queued_flush;			/* # of queued flushes */
 	struct llist_head issue_list;		/* list for command issue */
 	struct llist_node *dispatch_list;	/* list for command dispatch */
 };
@@ -956,6 +951,8 @@
 	F2FS_RD_DATA,
 	F2FS_RD_NODE,
 	F2FS_RD_META,
+	F2FS_DIO_WRITE,
+	F2FS_DIO_READ,
 	NR_COUNT_TYPE,
 };
 
@@ -1170,8 +1167,6 @@
 
 	/* for bio operations */
 	struct f2fs_bio_info *write_io[NR_PAGE_TYPE];	/* for write bios */
-	struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE];
-						/* bio ordering for NODE/DATA */
 	/* keep migration IO order for LFS mode */
 	struct rw_semaphore io_order_lock;
 	mempool_t *write_io_dummy;		/* Dummy pages */
@@ -1263,6 +1258,7 @@
 	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
 	unsigned int cur_victim_sec;		/* current victim section num */
 	unsigned int gc_mode;			/* current GC state */
+	unsigned int next_victim_seg[2];	/* next segment in victim section */
 	/* for skip statistic */
 	unsigned long long skipped_atomic_files[2];	/* FG_GC and BG_GC */
 	unsigned long long skipped_gc_rwsem;		/* FG_GC only */
@@ -1272,6 +1268,8 @@
 
 	/* maximum # of trials to find a victim segment for SSR and GC */
 	unsigned int max_victim_search;
+	/* migration granularity of garbage collection, unit: segment */
+	unsigned int migration_granularity;
 
 	/*
 	 * for stat information.
@@ -1330,6 +1328,13 @@
 	__u32 s_chksum_seed;
 };
 
+struct f2fs_private_dio {
+	struct inode *inode;
+	void *orig_private;
+	bio_end_io_t *orig_end_io;
+	bool write;
+};
+
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 #define f2fs_show_injection_info(type)					\
 	printk_ratelimited("%sF2FS-fs : inject %s in %s of %pF\n",	\
@@ -1608,12 +1613,16 @@
 {
 	unsigned long flags;
 
-	set_sbi_flag(sbi, SBI_NEED_FSCK);
+	/*
+	 * In order to re-enable nat_bits we need to call fsck.f2fs by
+	 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
+	 * so let's rely on regular fsck or unclean shutdown.
+	 */
 
 	if (lock)
 		spin_lock_irqsave(&sbi->cp_lock, flags);
 	__clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
-	kfree(NM_I(sbi)->nat_bits);
+	kvfree(NM_I(sbi)->nat_bits);
 	NM_I(sbi)->nat_bits = NULL;
 	if (lock)
 		spin_unlock_irqrestore(&sbi->cp_lock, flags);
@@ -2146,7 +2155,11 @@
 {
 	if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
 		get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
-		get_pages(sbi, F2FS_WB_CP_DATA))
+		get_pages(sbi, F2FS_WB_CP_DATA) ||
+		get_pages(sbi, F2FS_DIO_READ) ||
+		get_pages(sbi, F2FS_DIO_WRITE) ||
+		atomic_read(&SM_I(sbi)->dcc_info->queued_discard) ||
+		atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
 		return false;
 	return f2fs_time_over(sbi, type);
 }
@@ -2370,6 +2383,7 @@
 	case FI_NEW_INODE:
 		if (set)
 			return;
+		/* fall through */
 	case FI_DATA_EXIST:
 	case FI_INLINE_DOTS:
 	case FI_PIN_FILE:
@@ -2672,22 +2686,37 @@
 
 static inline bool f2fs_may_extent_tree(struct inode *inode)
 {
-	if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+	if (!test_opt(sbi, EXTENT_CACHE) ||
 			is_inode_flag_set(inode, FI_NO_EXTENT))
 		return false;
 
+	/*
+	 * for recovered files during mount do not create extents
+	 * if shrinker is not registered.
+	 */
+	if (list_empty(&sbi->s_list))
+		return false;
+
 	return S_ISREG(inode->i_mode);
 }
 
 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
 					size_t size, gfp_t flags)
 {
+	void *ret;
+
 	if (time_to_inject(sbi, FAULT_KMALLOC)) {
 		f2fs_show_injection_info(FAULT_KMALLOC);
 		return NULL;
 	}
 
-	return kmalloc(size, flags);
+	ret = kmalloc(size, flags);
+	if (ret)
+		return ret;
+
+	return kvmalloc(size, flags);
 }
 
 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
@@ -2762,6 +2791,8 @@
 	spin_unlock(&sbi->iostat_lock);
 }
 
+#define __is_large_section(sbi)		((sbi)->segs_per_sec > 1)
+
 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META &&	\
 				(!is_read_io(fio->op) || fio->is_meta))
 
@@ -3007,7 +3038,7 @@
 			struct f2fs_summary *sum, int type,
 			struct f2fs_io_info *fio, bool add_list);
 void f2fs_wait_on_page_writeback(struct page *page,
-			enum page_type type, bool ordered);
+			enum page_type type, bool ordered, bool locked);
 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
 								block_t len);
@@ -3147,6 +3178,7 @@
 	int total_count, utilization;
 	int bg_gc, nr_wb_cp_data, nr_wb_data;
 	int nr_rd_data, nr_rd_node, nr_rd_meta;
+	int nr_dio_read, nr_dio_write;
 	unsigned int io_skip_bggc, other_skip_bggc;
 	int nr_flushing, nr_flushed, flush_list_empty;
 	int nr_discarding, nr_discarded;
@@ -3459,9 +3491,9 @@
 }
 
 #define F2FS_FEATURE_FUNCS(name, flagname) \
-static inline int f2fs_sb_has_##name(struct super_block *sb) \
+static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
 { \
-	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_##flagname); \
+	return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
 }
 
 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
@@ -3491,7 +3523,7 @@
 
 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
 {
-	return f2fs_sb_has_blkzoned(sbi->sb);
+	return f2fs_sb_has_blkzoned(sbi);
 }
 
 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
@@ -3566,7 +3598,7 @@
 	 * for blkzoned device, fallback direct IO to buffered IO, so
 	 * all IOs can be serialized by log-structured write.
 	 */
-	if (f2fs_sb_has_blkzoned(sbi->sb))
+	if (f2fs_sb_has_blkzoned(sbi))
 		return true;
 	if (test_opt(sbi, LFS) && (rw == WRITE) &&
 				block_unaligned_IO(inode, iocb, iter))
@@ -3589,7 +3621,7 @@
 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
 {
 #ifdef CONFIG_QUOTA
-	if (f2fs_sb_has_quota_ino(sbi->sb))
+	if (f2fs_sb_has_quota_ino(sbi))
 		return true;
 	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
 		F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 88b1246..ae2b45e 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -82,7 +82,7 @@
 	}
 
 	/* fill the page */
-	f2fs_wait_on_page_writeback(page, DATA, false);
+	f2fs_wait_on_page_writeback(page, DATA, false, true);
 
 	/* wait for GCed page writeback via META_MAPPING */
 	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
@@ -216,6 +216,9 @@
 
 	trace_f2fs_sync_file_enter(inode);
 
+	if (S_ISDIR(inode->i_mode))
+		goto go_write;
+
 	/* if fdatasync is triggered, let's do in-place-update */
 	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
 		set_inode_flag(inode, FI_NEED_IPU);
@@ -575,7 +578,7 @@
 	if (IS_ERR(page))
 		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
 truncate_out:
-	f2fs_wait_on_page_writeback(page, DATA, true);
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
 	zero_user(page, offset, PAGE_SIZE - offset);
 
 	/* An encrypted inode should have a key and truncate the last page. */
@@ -696,7 +699,7 @@
 	unsigned int flags;
 
 	if (f2fs_has_extra_attr(inode) &&
-			f2fs_sb_has_inode_crtime(inode->i_sb) &&
+			f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
 		stat->result_mask |= STATX_BTIME;
 		stat->btime.tv_sec = fi->i_crtime.tv_sec;
@@ -892,7 +895,7 @@
 	if (IS_ERR(page))
 		return PTR_ERR(page);
 
-	f2fs_wait_on_page_writeback(page, DATA, true);
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
 	zero_user(page, start, len);
 	set_page_dirty(page);
 	f2fs_put_page(page, 1);
@@ -1496,7 +1499,8 @@
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
-			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE };
+			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
+			.m_may_create = true };
 	pgoff_t pg_end;
 	loff_t new_size = i_size_read(inode);
 	loff_t off_end;
@@ -1681,7 +1685,7 @@
 
 	inode->i_ctime = current_time(inode);
 	f2fs_set_inode_flags(inode);
-	f2fs_mark_inode_dirty_sync(inode, false);
+	f2fs_mark_inode_dirty_sync(inode, true);
 	return 0;
 }
 
@@ -1746,10 +1750,12 @@
 
 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 
-	if (!get_dirty_pages(inode))
-		goto skip_flush;
-
-	f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
+	/*
+	 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
+	 * f2fs_is_atomic_file.
+	 */
+	if (get_dirty_pages(inode))
+		f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
 		"Unexpected flush for atomic writes: ino=%lu, npages=%u",
 					inode->i_ino, get_dirty_pages(inode));
 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
@@ -1757,7 +1763,7 @@
 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
 		goto out;
 	}
-skip_flush:
+
 	set_inode_flag(inode, FI_ATOMIC_FILE);
 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -1962,6 +1968,13 @@
 		f2fs_stop_checkpoint(sbi, false);
 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
 		break;
+	case F2FS_GOING_DOWN_NEED_FSCK:
+		set_sbi_flag(sbi, SBI_NEED_FSCK);
+		/* do checkpoint only */
+		ret = f2fs_sync_fs(sb, 1);
+		if (ret)
+			goto out;
+		break;
 	default:
 		ret = -EINVAL;
 		goto out;
@@ -2030,7 +2043,7 @@
 {
 	struct inode *inode = file_inode(filp);
 
-	if (!f2fs_sb_has_encrypt(inode->i_sb))
+	if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
 		return -EOPNOTSUPP;
 
 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
@@ -2040,7 +2053,7 @@
 
 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
 {
-	if (!f2fs_sb_has_encrypt(file_inode(filp)->i_sb))
+	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
 		return -EOPNOTSUPP;
 	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
 }
@@ -2051,7 +2064,7 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	int err;
 
-	if (!f2fs_sb_has_encrypt(inode->i_sb))
+	if (!f2fs_sb_has_encrypt(sbi))
 		return -EOPNOTSUPP;
 
 	err = mnt_want_write_file(filp);
@@ -2155,7 +2168,7 @@
 	}
 
 	ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
-	range.start += sbi->blocks_per_seg;
+	range.start += BLKS_PER_SEC(sbi);
 	if (range.start <= end)
 		goto do_more;
 out:
@@ -2197,7 +2210,8 @@
 {
 	struct inode *inode = file_inode(filp);
 	struct f2fs_map_blocks map = { .m_next_extent = NULL,
-					.m_seg_type = NO_CHECK_TYPE };
+					.m_seg_type = NO_CHECK_TYPE ,
+					.m_may_create = false };
 	struct extent_info ei = {0, 0, 0};
 	pgoff_t pg_start, pg_end, next_pgofs;
 	unsigned int blk_per_seg = sbi->blocks_per_seg;
@@ -2560,7 +2574,7 @@
 		return -EFAULT;
 
 	if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
-			sbi->segs_per_sec != 1) {
+			__is_large_section(sbi)) {
 		f2fs_msg(sbi->sb, KERN_WARNING,
 			"Can't flush %u in %d for segs_per_sec %u != 1\n",
 				range.dev_num, sbi->s_ndevs,
@@ -2635,12 +2649,11 @@
 	struct inode *inode = file_inode(filp);
 	struct f2fs_inode_info *fi = F2FS_I(inode);
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct super_block *sb = sbi->sb;
 	struct page *ipage;
 	kprojid_t kprojid;
 	int err;
 
-	if (!f2fs_sb_has_project_quota(sb)) {
+	if (!f2fs_sb_has_project_quota(sbi)) {
 		if (projid != F2FS_DEF_PROJID)
 			return -EOPNOTSUPP;
 		else
@@ -2757,7 +2770,7 @@
 	fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags &
 				F2FS_FL_USER_VISIBLE);
 
-	if (f2fs_sb_has_project_quota(inode->i_sb))
+	if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
 		fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
 							fi->i_projid);
 
@@ -2932,6 +2945,7 @@
 	map.m_next_pgofs = NULL;
 	map.m_next_extent = &m_next_extent;
 	map.m_seg_type = NO_CHECK_TYPE;
+	map.m_may_create = false;
 	end = F2FS_I_SB(inode)->max_file_blocks;
 
 	while (map.m_lblk < end) {
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index a07241f..195cf0f 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -142,7 +142,7 @@
 			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
 	if (IS_ERR(gc_th->f2fs_gc_task)) {
 		err = PTR_ERR(gc_th->f2fs_gc_task);
-		kfree(gc_th);
+		kvfree(gc_th);
 		sbi->gc_thread = NULL;
 	}
 out:
@@ -155,7 +155,7 @@
 	if (!gc_th)
 		return;
 	kthread_stop(gc_th->f2fs_gc_task);
-	kfree(gc_th);
+	kvfree(gc_th);
 	sbi->gc_thread = NULL;
 }
 
@@ -323,8 +323,7 @@
 	p.min_cost = get_max_cost(sbi, &p);
 
 	if (*result != NULL_SEGNO) {
-		if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
-			get_valid_blocks(sbi, *result, false) &&
+		if (get_valid_blocks(sbi, *result, false) &&
 			!sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
 			p.min_segno = *result;
 		goto out;
@@ -333,6 +332,22 @@
 	if (p.max_search == 0)
 		goto out;
 
+	if (__is_large_section(sbi) && p.alloc_mode == LFS) {
+		if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
+			p.min_segno = sbi->next_victim_seg[BG_GC];
+			*result = p.min_segno;
+			sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
+			goto got_result;
+		}
+		if (gc_type == FG_GC &&
+				sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
+			p.min_segno = sbi->next_victim_seg[FG_GC];
+			*result = p.min_segno;
+			sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
+			goto got_result;
+		}
+	}
+
 	last_victim = sm->last_victim[p.gc_mode];
 	if (p.alloc_mode == LFS && gc_type == FG_GC) {
 		p.min_segno = check_bg_victims(sbi);
@@ -395,6 +410,8 @@
 	}
 	if (p.min_segno != NULL_SEGNO) {
 got_it:
+		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
+got_result:
 		if (p.alloc_mode == LFS) {
 			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
 			if (gc_type == FG_GC)
@@ -402,13 +419,13 @@
 			else
 				set_bit(secno, dirty_i->victim_secmap);
 		}
-		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
 
+	}
+out:
+	if (p.min_segno != NULL_SEGNO)
 		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
 				sbi->cur_victim_sec,
 				prefree_segments(sbi), free_segments(sbi));
-	}
-out:
 	mutex_unlock(&dirty_i->seglist_lock);
 
 	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
@@ -658,6 +675,14 @@
 	fio.page = page;
 	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
 
+	/*
+	 * don't cache encrypted data into meta inode until previous dirty
+	 * data were writebacked to avoid racing between GC and flush.
+	 */
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
+
+	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
+
 	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
 					dn.data_blkaddr,
 					FGP_LOCK | FGP_CREAT, GFP_NOFS);
@@ -743,7 +768,9 @@
 	 * don't cache encrypted data into meta inode until previous dirty
 	 * data were writebacked to avoid racing between GC and flush.
 	 */
-	f2fs_wait_on_page_writeback(page, DATA, true);
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
+
+	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
 
 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
 	if (err)
@@ -802,8 +829,8 @@
 	}
 
 write_page:
+	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
 	set_page_dirty(fio.encrypted_page);
-	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
 	if (clear_page_dirty_for_io(fio.encrypted_page))
 		dec_page_count(fio.sbi, F2FS_DIRTY_META);
 
@@ -811,7 +838,7 @@
 	ClearPageError(page);
 
 	/* allocate block address */
-	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
+	f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
 
 	fio.op = REQ_OP_WRITE;
 	fio.op_flags = REQ_SYNC;
@@ -897,8 +924,9 @@
 		bool is_dirty = PageDirty(page);
 
 retry:
+		f2fs_wait_on_page_writeback(page, DATA, true, true);
+
 		set_page_dirty(page);
-		f2fs_wait_on_page_writeback(page, DATA, true);
 		if (clear_page_dirty_for_io(page)) {
 			inode_dec_dirty_pages(inode);
 			f2fs_remove_dirty_inode(inode);
@@ -1093,15 +1121,18 @@
 	struct blk_plug plug;
 	unsigned int segno = start_segno;
 	unsigned int end_segno = start_segno + sbi->segs_per_sec;
-	int seg_freed = 0;
+	int seg_freed = 0, migrated = 0;
 	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
 						SUM_TYPE_DATA : SUM_TYPE_NODE;
 	int submitted = 0;
 
+	if (__is_large_section(sbi))
+		end_segno = rounddown(end_segno, sbi->segs_per_sec);
+
 	/* readahead multi ssa blocks those have contiguous address */
-	if (sbi->segs_per_sec > 1)
+	if (__is_large_section(sbi))
 		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
-					sbi->segs_per_sec, META_SSA, true);
+					end_segno - segno, META_SSA, true);
 
 	/* reference all summary page */
 	while (segno < end_segno) {
@@ -1130,10 +1161,13 @@
 					GET_SUM_BLOCK(sbi, segno));
 		f2fs_put_page(sum_page, 0);
 
-		if (get_valid_blocks(sbi, segno, false) == 0 ||
-				!PageUptodate(sum_page) ||
-				unlikely(f2fs_cp_error(sbi)))
-			goto next;
+		if (get_valid_blocks(sbi, segno, false) == 0)
+			goto freed;
+		if (__is_large_section(sbi) &&
+				migrated >= sbi->migration_granularity)
+			goto skip;
+		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
+			goto skip;
 
 		sum = page_address(sum_page);
 		if (type != GET_SUM_TYPE((&sum->footer))) {
@@ -1141,7 +1175,7 @@
 				"type [%d, %d] in SSA and SIT",
 				segno, type, GET_SUM_TYPE((&sum->footer)));
 			set_sbi_flag(sbi, SBI_NEED_FSCK);
-			goto next;
+			goto skip;
 		}
 
 		/*
@@ -1160,10 +1194,15 @@
 
 		stat_inc_seg_count(sbi, type, gc_type);
 
+freed:
 		if (gc_type == FG_GC &&
 				get_valid_blocks(sbi, segno, false) == 0)
 			seg_freed++;
-next:
+		migrated++;
+
+		if (__is_large_section(sbi) && segno + 1 < end_segno)
+			sbi->next_victim_seg[gc_type] = segno + 1;
+skip:
 		f2fs_put_page(sum_page, 0);
 	}
 
@@ -1307,7 +1346,7 @@
 	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
 
 	/* give warm/cold data area from slower device */
-	if (sbi->s_ndevs && sbi->segs_per_sec == 1)
+	if (sbi->s_ndevs && !__is_large_section(sbi))
 		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
 				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
 }
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 3638927..b8676a2 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -73,7 +73,7 @@
 
 	addr = inline_data_addr(inode, ipage);
 
-	f2fs_wait_on_page_writeback(ipage, NODE, true);
+	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 	memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
 	set_page_dirty(ipage);
 
@@ -179,7 +179,7 @@
 	fio.old_blkaddr = dn->data_blkaddr;
 	set_inode_flag(dn->inode, FI_HOT_DATA);
 	f2fs_outplace_write_data(dn, &fio);
-	f2fs_wait_on_page_writeback(page, DATA, true);
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
 	if (dirty) {
 		inode_dec_dirty_pages(dn->inode);
 		f2fs_remove_dirty_inode(dn->inode);
@@ -254,7 +254,7 @@
 
 	f2fs_bug_on(F2FS_I_SB(inode), page->index);
 
-	f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
+	f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true);
 	src_addr = kmap_atomic(page);
 	dst_addr = inline_data_addr(inode, dn.inode_page);
 	memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
@@ -295,7 +295,7 @@
 		ipage = f2fs_get_node_page(sbi, inode->i_ino);
 		f2fs_bug_on(sbi, IS_ERR(ipage));
 
-		f2fs_wait_on_page_writeback(ipage, NODE, true);
+		f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 
 		src_addr = inline_data_addr(inode, npage);
 		dst_addr = inline_data_addr(inode, ipage);
@@ -409,7 +409,7 @@
 		goto out;
 	}
 
-	f2fs_wait_on_page_writeback(page, DATA, true);
+	f2fs_wait_on_page_writeback(page, DATA, true, true);
 
 	dentry_blk = page_address(page);
 
@@ -519,18 +519,18 @@
 
 	stat_dec_inline_dir(dir);
 	clear_inode_flag(dir, FI_INLINE_DENTRY);
-	kfree(backup_dentry);
+	kvfree(backup_dentry);
 	return 0;
 recover:
 	lock_page(ipage);
-	f2fs_wait_on_page_writeback(ipage, NODE, true);
+	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 	memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
 	f2fs_i_depth_write(dir, 0);
 	f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
 	set_page_dirty(ipage);
 	f2fs_put_page(ipage, 1);
 
-	kfree(backup_dentry);
+	kvfree(backup_dentry);
 	return err;
 }
 
@@ -583,7 +583,7 @@
 		}
 	}
 
-	f2fs_wait_on_page_writeback(ipage, NODE, true);
+	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 
 	name_hash = f2fs_dentry_hash(new_name, NULL);
 	f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
@@ -615,7 +615,7 @@
 	int i;
 
 	lock_page(page);
-	f2fs_wait_on_page_writeback(page, NODE, true);
+	f2fs_wait_on_page_writeback(page, NODE, true, true);
 
 	inline_dentry = inline_data_addr(dir, page);
 	make_dentry_ptr_inline(dir, &d, inline_dentry);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 91ceee0..bec5296 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -103,7 +103,7 @@
 
 	while (start < end) {
 		if (*start++) {
-			f2fs_wait_on_page_writeback(ipage, NODE, true);
+			f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 
 			set_inode_flag(inode, FI_DATA_EXIST);
 			set_raw_inline(inode, F2FS_INODE(ipage));
@@ -118,7 +118,7 @@
 {
 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
 
-	if (!f2fs_sb_has_inode_chksum(sbi->sb))
+	if (!f2fs_sb_has_inode_chksum(sbi))
 		return false;
 
 	if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
@@ -218,7 +218,7 @@
 		return false;
 	}
 
-	if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)
+	if (f2fs_sb_has_flexible_inline_xattr(sbi)
 			&& !f2fs_has_extra_attr(inode)) {
 		set_sbi_flag(sbi, SBI_NEED_FSCK);
 		f2fs_msg(sbi->sb, KERN_WARNING,
@@ -228,7 +228,7 @@
 	}
 
 	if (f2fs_has_extra_attr(inode) &&
-			!f2fs_sb_has_extra_attr(sbi->sb)) {
+			!f2fs_sb_has_extra_attr(sbi)) {
 		set_sbi_flag(sbi, SBI_NEED_FSCK);
 		f2fs_msg(sbi->sb, KERN_WARNING,
 			"%s: inode (ino=%lx) is with extra_attr, "
@@ -340,7 +340,7 @@
 	fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
 					le16_to_cpu(ri->i_extra_isize) : 0;
 
-	if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
+	if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
 		fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
 	} else if (f2fs_has_inline_xattr(inode) ||
 				f2fs_has_inline_dentry(inode)) {
@@ -390,14 +390,14 @@
 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
 		set_inode_flag(inode, FI_PROJ_INHERIT);
 
-	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi->sb) &&
+	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
 		i_projid = (projid_t)le32_to_cpu(ri->i_projid);
 	else
 		i_projid = F2FS_DEF_PROJID;
 	fi->i_projid = make_kprojid(&init_user_ns, i_projid);
 
-	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi->sb) &&
+	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
 		fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
 		fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
@@ -497,7 +497,7 @@
 	struct f2fs_inode *ri;
 	struct extent_tree *et = F2FS_I(inode)->extent_tree;
 
-	f2fs_wait_on_page_writeback(node_page, NODE, true);
+	f2fs_wait_on_page_writeback(node_page, NODE, true, true);
 	set_page_dirty(node_page);
 
 	f2fs_inode_synced(inode);
@@ -542,11 +542,11 @@
 	if (f2fs_has_extra_attr(inode)) {
 		ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
 
-		if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)->sb))
+		if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
 			ri->i_inline_xattr_size =
 				cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
 
-		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
+		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
 								i_projid)) {
 			projid_t i_projid;
@@ -556,7 +556,7 @@
 			ri->i_projid = cpu_to_le32(i_projid);
 		}
 
-		if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)->sb) &&
+		if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
 								i_crtime)) {
 			ri->i_crtime =
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 99299ed..62d9829 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -61,7 +61,7 @@
 		goto fail;
 	}
 
-	if (f2fs_sb_has_project_quota(sbi->sb) &&
+	if (f2fs_sb_has_project_quota(sbi) &&
 		(F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL))
 		F2FS_I(inode)->i_projid = F2FS_I(dir)->i_projid;
 	else
@@ -79,7 +79,7 @@
 				f2fs_may_encrypt(inode))
 		f2fs_set_encrypted_inode(inode);
 
-	if (f2fs_sb_has_extra_attr(sbi->sb)) {
+	if (f2fs_sb_has_extra_attr(sbi)) {
 		set_inode_flag(inode, FI_EXTRA_ATTR);
 		F2FS_I(inode)->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE;
 	}
@@ -92,7 +92,7 @@
 	if (f2fs_may_inline_dentry(inode))
 		set_inode_flag(inode, FI_INLINE_DENTRY);
 
-	if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
+	if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
 		f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode));
 		if (f2fs_has_inline_xattr(inode))
 			xattr_size = F2FS_OPTION(sbi).inline_xattr_size;
@@ -635,7 +635,7 @@
 	f2fs_handle_failed_inode(inode);
 out_free_encrypted_link:
 	if (disk_link.name != (unsigned char *)symname)
-		kfree(disk_link.name);
+		kvfree(disk_link.name);
 	return err;
 }
 
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 2b342064..6162d2c 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -826,6 +826,7 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	struct node_info ni;
 	int err;
+	pgoff_t index;
 
 	err = f2fs_get_node_info(sbi, dn->nid, &ni);
 	if (err)
@@ -845,10 +846,11 @@
 	clear_node_page_dirty(dn->node_page);
 	set_sbi_flag(sbi, SBI_IS_DIRTY);
 
+	index = dn->node_page->index;
 	f2fs_put_page(dn->node_page, 1);
 
 	invalidate_mapping_pages(NODE_MAPPING(sbi),
-			dn->node_page->index, dn->node_page->index);
+			index, index);
 
 	dn->node_page = NULL;
 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
@@ -1104,7 +1106,7 @@
 				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
 			lock_page(page);
 			BUG_ON(page->mapping != NODE_MAPPING(sbi));
-			f2fs_wait_on_page_writeback(page, NODE, true);
+			f2fs_wait_on_page_writeback(page, NODE, true, true);
 			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
 			set_page_dirty(page);
 			unlock_page(page);
@@ -1232,7 +1234,7 @@
 	new_ni.version = 0;
 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
 
-	f2fs_wait_on_page_writeback(page, NODE, true);
+	f2fs_wait_on_page_writeback(page, NODE, true, true);
 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
 	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
 	if (!PageUptodate(page))
@@ -1598,10 +1600,10 @@
 			.for_reclaim = 0,
 		};
 
-		set_page_dirty(node_page);
-		f2fs_wait_on_page_writeback(node_page, NODE, true);
+		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
 
-		f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page));
+		set_page_dirty(node_page);
+
 		if (!clear_page_dirty_for_io(node_page)) {
 			err = -EAGAIN;
 			goto out_page;
@@ -1689,8 +1691,7 @@
 				goto continue_unlock;
 			}
 
-			f2fs_wait_on_page_writeback(page, NODE, true);
-			BUG_ON(PageWriteback(page));
+			f2fs_wait_on_page_writeback(page, NODE, true, true);
 
 			set_fsync_mark(page, 0);
 			set_dentry_mark(page, 0);
@@ -1741,7 +1742,7 @@
 			"Retry to write fsync mark: ino=%u, idx=%lx",
 					ino, last_page->index);
 		lock_page(last_page);
-		f2fs_wait_on_page_writeback(last_page, NODE, true);
+		f2fs_wait_on_page_writeback(last_page, NODE, true, true);
 		set_page_dirty(last_page);
 		unlock_page(last_page);
 		goto retry;
@@ -1822,9 +1823,8 @@
 				goto lock_node;
 			}
 
-			f2fs_wait_on_page_writeback(page, NODE, true);
+			f2fs_wait_on_page_writeback(page, NODE, true, true);
 
-			BUG_ON(PageWriteback(page));
 			if (!clear_page_dirty_for_io(page))
 				goto continue_unlock;
 
@@ -1891,7 +1891,7 @@
 		get_page(page);
 		spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
 
-		f2fs_wait_on_page_writeback(page, NODE, true);
+		f2fs_wait_on_page_writeback(page, NODE, true, false);
 		if (TestClearPageError(page))
 			ret = -EIO;
 
@@ -2469,7 +2469,7 @@
 	src_addr = inline_xattr_addr(inode, page);
 	inline_size = inline_xattr_size(inode);
 
-	f2fs_wait_on_page_writeback(ipage, NODE, true);
+	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 	memcpy(dst_addr, src_addr, inline_size);
 update_inode:
 	f2fs_update_inode(inode, ipage);
@@ -2563,17 +2563,17 @@
 	if (dst->i_inline & F2FS_EXTRA_ATTR) {
 		dst->i_extra_isize = src->i_extra_isize;
 
-		if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) &&
+		if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
 							i_inline_xattr_size))
 			dst->i_inline_xattr_size = src->i_inline_xattr_size;
 
-		if (f2fs_sb_has_project_quota(sbi->sb) &&
+		if (f2fs_sb_has_project_quota(sbi) &&
 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
 								i_projid))
 			dst->i_projid = src->i_projid;
 
-		if (f2fs_sb_has_inode_crtime(sbi->sb) &&
+		if (f2fs_sb_has_inode_crtime(sbi) &&
 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
 							i_crtime_nsec)) {
 			dst->i_crtime = src->i_crtime;
@@ -3115,17 +3115,17 @@
 
 		for (i = 0; i < nm_i->nat_blocks; i++)
 			kvfree(nm_i->free_nid_bitmap[i]);
-		kfree(nm_i->free_nid_bitmap);
+		kvfree(nm_i->free_nid_bitmap);
 	}
 	kvfree(nm_i->free_nid_count);
 
-	kfree(nm_i->nat_bitmap);
-	kfree(nm_i->nat_bits);
+	kvfree(nm_i->nat_bitmap);
+	kvfree(nm_i->nat_bits);
 #ifdef CONFIG_F2FS_CHECK_FS
-	kfree(nm_i->nat_bitmap_mir);
+	kvfree(nm_i->nat_bitmap_mir);
 #endif
 	sbi->nm_info = NULL;
-	kfree(nm_i);
+	kvfree(nm_i);
 }
 
 int __init f2fs_create_node_manager_caches(void)
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 1c73d87..e05af5d 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -361,7 +361,7 @@
 {
 	struct f2fs_node *rn = F2FS_NODE(p);
 
-	f2fs_wait_on_page_writeback(p, NODE, true);
+	f2fs_wait_on_page_writeback(p, NODE, true, true);
 
 	if (i)
 		rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 1dfb17f..e3883db 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -250,7 +250,7 @@
 	i_gid_write(inode, le32_to_cpu(raw->i_gid));
 
 	if (raw->i_inline & F2FS_EXTRA_ATTR) {
-		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
+		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
 			F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
 								i_projid)) {
 			projid_t i_projid;
@@ -539,7 +539,7 @@
 		goto out;
 	}
 
-	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
+	f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
 
 	err = f2fs_get_node_info(sbi, dn.nid, &ni);
 	if (err)
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 6edcf83..9b79056 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -229,7 +229,7 @@
 
 		lock_page(page);
 
-		f2fs_wait_on_page_writeback(page, DATA, true);
+		f2fs_wait_on_page_writeback(page, DATA, true, true);
 
 		if (recover) {
 			struct dnode_of_data dn;
@@ -387,8 +387,9 @@
 		if (page->mapping == inode->i_mapping) {
 			trace_f2fs_commit_inmem_page(page, INMEM);
 
+			f2fs_wait_on_page_writeback(page, DATA, true, true);
+
 			set_page_dirty(page);
-			f2fs_wait_on_page_writeback(page, DATA, true);
 			if (clear_page_dirty_for_io(page)) {
 				inode_dec_dirty_pages(inode);
 				f2fs_remove_dirty_inode(inode);
@@ -620,14 +621,16 @@
 		return 0;
 
 	if (!test_opt(sbi, FLUSH_MERGE)) {
+		atomic_inc(&fcc->queued_flush);
 		ret = submit_flush_wait(sbi, ino);
+		atomic_dec(&fcc->queued_flush);
 		atomic_inc(&fcc->issued_flush);
 		return ret;
 	}
 
-	if (atomic_inc_return(&fcc->issing_flush) == 1 || sbi->s_ndevs > 1) {
+	if (atomic_inc_return(&fcc->queued_flush) == 1 || sbi->s_ndevs > 1) {
 		ret = submit_flush_wait(sbi, ino);
-		atomic_dec(&fcc->issing_flush);
+		atomic_dec(&fcc->queued_flush);
 
 		atomic_inc(&fcc->issued_flush);
 		return ret;
@@ -646,14 +649,14 @@
 
 	if (fcc->f2fs_issue_flush) {
 		wait_for_completion(&cmd.wait);
-		atomic_dec(&fcc->issing_flush);
+		atomic_dec(&fcc->queued_flush);
 	} else {
 		struct llist_node *list;
 
 		list = llist_del_all(&fcc->issue_list);
 		if (!list) {
 			wait_for_completion(&cmd.wait);
-			atomic_dec(&fcc->issing_flush);
+			atomic_dec(&fcc->queued_flush);
 		} else {
 			struct flush_cmd *tmp, *next;
 
@@ -662,7 +665,7 @@
 			llist_for_each_entry_safe(tmp, next, list, llnode) {
 				if (tmp == &cmd) {
 					cmd.ret = ret;
-					atomic_dec(&fcc->issing_flush);
+					atomic_dec(&fcc->queued_flush);
 					continue;
 				}
 				tmp->ret = ret;
@@ -691,7 +694,7 @@
 	if (!fcc)
 		return -ENOMEM;
 	atomic_set(&fcc->issued_flush, 0);
-	atomic_set(&fcc->issing_flush, 0);
+	atomic_set(&fcc->queued_flush, 0);
 	init_waitqueue_head(&fcc->flush_wait_queue);
 	init_llist_head(&fcc->issue_list);
 	SM_I(sbi)->fcc_info = fcc;
@@ -703,7 +706,7 @@
 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
 	if (IS_ERR(fcc->f2fs_issue_flush)) {
 		err = PTR_ERR(fcc->f2fs_issue_flush);
-		kfree(fcc);
+		kvfree(fcc);
 		SM_I(sbi)->fcc_info = NULL;
 		return err;
 	}
@@ -722,7 +725,7 @@
 		kthread_stop(flush_thread);
 	}
 	if (free) {
-		kfree(fcc);
+		kvfree(fcc);
 		SM_I(sbi)->fcc_info = NULL;
 	}
 }
@@ -907,7 +910,7 @@
 	dc->len = len;
 	dc->ref = 0;
 	dc->state = D_PREP;
-	dc->issuing = 0;
+	dc->queued = 0;
 	dc->error = 0;
 	init_completion(&dc->wait);
 	list_add_tail(&dc->list, pend_list);
@@ -940,7 +943,7 @@
 							struct discard_cmd *dc)
 {
 	if (dc->state == D_DONE)
-		atomic_sub(dc->issuing, &dcc->issing_discard);
+		atomic_sub(dc->queued, &dcc->queued_discard);
 
 	list_del(&dc->list);
 	rb_erase_cached(&dc->rb_node, &dcc->root);
@@ -1143,12 +1146,12 @@
 		dc->bio_ref++;
 		spin_unlock_irqrestore(&dc->lock, flags);
 
-		atomic_inc(&dcc->issing_discard);
-		dc->issuing++;
+		atomic_inc(&dcc->queued_discard);
+		dc->queued++;
 		list_move_tail(&dc->list, wait_list);
 
 		/* sanity check on discard range */
-		__check_sit_bitmap(sbi, start, start + len);
+		__check_sit_bitmap(sbi, lstart, lstart + len);
 
 		bio->bi_private = dc;
 		bio->bi_end_io = f2fs_submit_discard_endio;
@@ -1649,6 +1652,10 @@
 		if (dcc->discard_wake)
 			dcc->discard_wake = 0;
 
+		/* clean up pending candidates before going to sleep */
+		if (atomic_read(&dcc->queued_discard))
+			__wait_all_discard_cmd(sbi, NULL);
+
 		if (try_to_freeze())
 			continue;
 		if (f2fs_readonly(sbi->sb))
@@ -1734,7 +1741,7 @@
 		struct block_device *bdev, block_t blkstart, block_t blklen)
 {
 #ifdef CONFIG_BLK_DEV_ZONED
-	if (f2fs_sb_has_blkzoned(sbi->sb) &&
+	if (f2fs_sb_has_blkzoned(sbi) &&
 				bdev_zoned_model(bdev) != BLK_ZONED_NONE)
 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
 #endif
@@ -1882,7 +1889,7 @@
 	unsigned int start = 0, end = -1;
 	unsigned int secno, start_segno;
 	bool force = (cpc->reason & CP_DISCARD);
-	bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
+	bool need_align = test_opt(sbi, LFS) && __is_large_section(sbi);
 
 	mutex_lock(&dirty_i->seglist_lock);
 
@@ -1914,7 +1921,7 @@
 					(end - 1) <= cpc->trim_end)
 				continue;
 
-		if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
+		if (!test_opt(sbi, LFS) || !__is_large_section(sbi)) {
 			f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
 				(end - start) << sbi->log_blocks_per_seg);
 			continue;
@@ -1946,7 +1953,7 @@
 					sbi->blocks_per_seg, cur_pos);
 			len = next_pos - cur_pos;
 
-			if (f2fs_sb_has_blkzoned(sbi->sb) ||
+			if (f2fs_sb_has_blkzoned(sbi) ||
 			    (force && len < cpc->trim_minlen))
 				goto skip;
 
@@ -1994,7 +2001,7 @@
 	INIT_LIST_HEAD(&dcc->fstrim_list);
 	mutex_init(&dcc->cmd_lock);
 	atomic_set(&dcc->issued_discard, 0);
-	atomic_set(&dcc->issing_discard, 0);
+	atomic_set(&dcc->queued_discard, 0);
 	atomic_set(&dcc->discard_cmd_cnt, 0);
 	dcc->nr_discards = 0;
 	dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
@@ -2010,7 +2017,7 @@
 				"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
 	if (IS_ERR(dcc->f2fs_issue_discard)) {
 		err = PTR_ERR(dcc->f2fs_issue_discard);
-		kfree(dcc);
+		kvfree(dcc);
 		SM_I(sbi)->dcc_info = NULL;
 		return err;
 	}
@@ -2027,7 +2034,7 @@
 
 	f2fs_stop_discard_thread(sbi);
 
-	kfree(dcc);
+	kvfree(dcc);
 	SM_I(sbi)->dcc_info = NULL;
 }
 
@@ -2146,7 +2153,7 @@
 	/* update total number of valid blocks to be written in ckpt area */
 	SIT_I(sbi)->written_valid_blocks += del;
 
-	if (sbi->segs_per_sec > 1)
+	if (__is_large_section(sbi))
 		get_sec_entry(sbi, segno)->valid_blocks += del;
 }
 
@@ -2412,7 +2419,7 @@
 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
 {
 	/* if segs_per_sec is large than 1, we need to keep original policy. */
-	if (sbi->segs_per_sec != 1)
+	if (__is_large_section(sbi))
 		return CURSEG_I(sbi, type)->segno;
 
 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
@@ -2722,7 +2729,7 @@
 	struct discard_policy dpolicy;
 	unsigned long long trimmed = 0;
 	int err = 0;
-	bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
+	bool need_align = test_opt(sbi, LFS) && __is_large_section(sbi);
 
 	if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
 		return -EINVAL;
@@ -3272,16 +3279,18 @@
 }
 
 void f2fs_wait_on_page_writeback(struct page *page,
-				enum page_type type, bool ordered)
+				enum page_type type, bool ordered, bool locked)
 {
 	if (PageWriteback(page)) {
 		struct f2fs_sb_info *sbi = F2FS_P_SB(page);
 
 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
-		if (ordered)
+		if (ordered) {
 			wait_on_page_writeback(page);
-		else
+			f2fs_bug_on(sbi, locked && PageWriteback(page));
+		} else {
 			wait_for_stable_page(page);
+		}
 	}
 }
 
@@ -3298,7 +3307,7 @@
 
 	cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
 	if (cpage) {
-		f2fs_wait_on_page_writeback(cpage, DATA, true);
+		f2fs_wait_on_page_writeback(cpage, DATA, true, true);
 		f2fs_put_page(cpage, 1);
 	}
 }
@@ -3880,7 +3889,7 @@
 	if (!sit_i->tmp_map)
 		return -ENOMEM;
 
-	if (sbi->segs_per_sec > 1) {
+	if (__is_large_section(sbi)) {
 		sit_i->sec_entries =
 			f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
 						      MAIN_SECS(sbi)),
@@ -4035,7 +4044,7 @@
 					se->valid_blocks;
 			}
 
-			if (sbi->segs_per_sec > 1)
+			if (__is_large_section(sbi))
 				get_sec_entry(sbi, start)->valid_blocks +=
 							se->valid_blocks;
 		}
@@ -4079,7 +4088,7 @@
 			sbi->discard_blks -= se->valid_blocks;
 		}
 
-		if (sbi->segs_per_sec > 1) {
+		if (__is_large_section(sbi)) {
 			get_sec_entry(sbi, start)->valid_blocks +=
 							se->valid_blocks;
 			get_sec_entry(sbi, start)->valid_blocks -=
@@ -4314,7 +4323,7 @@
 
 	destroy_victim_secmap(sbi);
 	SM_I(sbi)->dirty_info = NULL;
-	kfree(dirty_i);
+	kvfree(dirty_i);
 }
 
 static void destroy_curseg(struct f2fs_sb_info *sbi)
@@ -4326,10 +4335,10 @@
 		return;
 	SM_I(sbi)->curseg_array = NULL;
 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
-		kfree(array[i].sum_blk);
-		kfree(array[i].journal);
+		kvfree(array[i].sum_blk);
+		kvfree(array[i].journal);
 	}
-	kfree(array);
+	kvfree(array);
 }
 
 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
@@ -4340,7 +4349,7 @@
 	SM_I(sbi)->free_info = NULL;
 	kvfree(free_i->free_segmap);
 	kvfree(free_i->free_secmap);
-	kfree(free_i);
+	kvfree(free_i);
 }
 
 static void destroy_sit_info(struct f2fs_sb_info *sbi)
@@ -4353,26 +4362,26 @@
 
 	if (sit_i->sentries) {
 		for (start = 0; start < MAIN_SEGS(sbi); start++) {
-			kfree(sit_i->sentries[start].cur_valid_map);
+			kvfree(sit_i->sentries[start].cur_valid_map);
 #ifdef CONFIG_F2FS_CHECK_FS
-			kfree(sit_i->sentries[start].cur_valid_map_mir);
+			kvfree(sit_i->sentries[start].cur_valid_map_mir);
 #endif
-			kfree(sit_i->sentries[start].ckpt_valid_map);
-			kfree(sit_i->sentries[start].discard_map);
+			kvfree(sit_i->sentries[start].ckpt_valid_map);
+			kvfree(sit_i->sentries[start].discard_map);
 		}
 	}
-	kfree(sit_i->tmp_map);
+	kvfree(sit_i->tmp_map);
 
 	kvfree(sit_i->sentries);
 	kvfree(sit_i->sec_entries);
 	kvfree(sit_i->dirty_sentries_bitmap);
 
 	SM_I(sbi)->sit_info = NULL;
-	kfree(sit_i->sit_bitmap);
+	kvfree(sit_i->sit_bitmap);
 #ifdef CONFIG_F2FS_CHECK_FS
-	kfree(sit_i->sit_bitmap_mir);
+	kvfree(sit_i->sit_bitmap_mir);
 #endif
-	kfree(sit_i);
+	kvfree(sit_i);
 }
 
 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
@@ -4388,7 +4397,7 @@
 	destroy_free_segmap(sbi);
 	destroy_sit_info(sbi);
 	sbi->sm_info = NULL;
-	kfree(sm_info);
+	kvfree(sm_info);
 }
 
 int __init f2fs_create_segment_manager_caches(void)
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index ab3465f..a77f76f 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -333,7 +333,7 @@
 	 * In order to get # of valid blocks in a section instantly from many
 	 * segments, f2fs manages two counting structures separately.
 	 */
-	if (use_section && sbi->segs_per_sec > 1)
+	if (use_section && __is_large_section(sbi))
 		return get_sec_entry(sbi, segno)->valid_blocks;
 	else
 		return get_seg_entry(sbi, segno)->valid_blocks;
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index 9e13db9..a467aca 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -135,6 +135,6 @@
 	f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
 
 	spin_lock(&f2fs_list_lock);
-	list_del(&sbi->s_list);
+	list_del_init(&sbi->s_list);
 	spin_unlock(&f2fs_list_lock);
 }
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index af58b2c..14f033e 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -38,7 +38,7 @@
 
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 
-char *f2fs_fault_name[FAULT_MAX] = {
+const char *f2fs_fault_name[FAULT_MAX] = {
 	[FAULT_KMALLOC]		= "kmalloc",
 	[FAULT_KVMALLOC]	= "kvmalloc",
 	[FAULT_PAGE_ALLOC]	= "page alloc",
@@ -259,7 +259,7 @@
 			"quota options when quota turned on");
 		return -EINVAL;
 	}
-	if (f2fs_sb_has_quota_ino(sb)) {
+	if (f2fs_sb_has_quota_ino(sbi)) {
 		f2fs_msg(sb, KERN_INFO,
 			"QUOTA feature is enabled, so ignore qf_name");
 		return 0;
@@ -289,7 +289,7 @@
 	set_opt(sbi, QUOTA);
 	return 0;
 errout:
-	kfree(qname);
+	kvfree(qname);
 	return ret;
 }
 
@@ -302,7 +302,7 @@
 			" when quota turned on");
 		return -EINVAL;
 	}
-	kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
+	kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
 	F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
 	return 0;
 }
@@ -314,7 +314,7 @@
 	 * 'grpquota' mount options are allowed even without quota feature
 	 * to support legacy quotas in quota files.
 	 */
-	if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
+	if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
 		f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
 			 "Cannot enable project quota enforcement.");
 		return -1;
@@ -348,7 +348,7 @@
 		}
 	}
 
-	if (f2fs_sb_has_quota_ino(sbi->sb) && F2FS_OPTION(sbi).s_jquota_fmt) {
+	if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
 		f2fs_msg(sbi->sb, KERN_INFO,
 			"QUOTA feature is enabled, so ignore jquota_fmt");
 		F2FS_OPTION(sbi).s_jquota_fmt = 0;
@@ -399,10 +399,10 @@
 				set_opt(sbi, BG_GC);
 				set_opt(sbi, FORCE_FG_GC);
 			} else {
-				kfree(name);
+				kvfree(name);
 				return -EINVAL;
 			}
-			kfree(name);
+			kvfree(name);
 			break;
 		case Opt_disable_roll_forward:
 			set_opt(sbi, DISABLE_ROLL_FORWARD);
@@ -417,7 +417,7 @@
 			set_opt(sbi, DISCARD);
 			break;
 		case Opt_nodiscard:
-			if (f2fs_sb_has_blkzoned(sb)) {
+			if (f2fs_sb_has_blkzoned(sbi)) {
 				f2fs_msg(sb, KERN_WARNING,
 					"discard is required for zoned block devices");
 				return -EINVAL;
@@ -566,11 +566,11 @@
 				return -ENOMEM;
 			if (strlen(name) == 8 &&
 					!strncmp(name, "adaptive", 8)) {
-				if (f2fs_sb_has_blkzoned(sb)) {
+				if (f2fs_sb_has_blkzoned(sbi)) {
 					f2fs_msg(sb, KERN_WARNING,
 						 "adaptive mode is not allowed with "
 						 "zoned block device feature");
-					kfree(name);
+					kvfree(name);
 					return -EINVAL;
 				}
 				set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
@@ -578,10 +578,10 @@
 					!strncmp(name, "lfs", 3)) {
 				set_opt_mode(sbi, F2FS_MOUNT_LFS);
 			} else {
-				kfree(name);
+				kvfree(name);
 				return -EINVAL;
 			}
-			kfree(name);
+			kvfree(name);
 			break;
 		case Opt_io_size_bits:
 			if (args->from && match_int(args, &arg))
@@ -714,10 +714,10 @@
 					!strncmp(name, "fs-based", 8)) {
 				F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
 			} else {
-				kfree(name);
+				kvfree(name);
 				return -EINVAL;
 			}
-			kfree(name);
+			kvfree(name);
 			break;
 		case Opt_alloc:
 			name = match_strdup(&args[0]);
@@ -731,10 +731,10 @@
 					!strncmp(name, "reuse", 5)) {
 				F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
 			} else {
-				kfree(name);
+				kvfree(name);
 				return -EINVAL;
 			}
-			kfree(name);
+			kvfree(name);
 			break;
 		case Opt_fsync:
 			name = match_strdup(&args[0]);
@@ -751,14 +751,14 @@
 				F2FS_OPTION(sbi).fsync_mode =
 							FSYNC_MODE_NOBARRIER;
 			} else {
-				kfree(name);
+				kvfree(name);
 				return -EINVAL;
 			}
-			kfree(name);
+			kvfree(name);
 			break;
 		case Opt_test_dummy_encryption:
 #ifdef CONFIG_F2FS_FS_ENCRYPTION
-			if (!f2fs_sb_has_encrypt(sb)) {
+			if (!f2fs_sb_has_encrypt(sbi)) {
 				f2fs_msg(sb, KERN_ERR, "Encrypt feature is off");
 				return -EINVAL;
 			}
@@ -783,10 +783,10 @@
 					!strncmp(name, "disable", 7)) {
 				set_opt(sbi, DISABLE_CHECKPOINT);
 			} else {
-				kfree(name);
+				kvfree(name);
 				return -EINVAL;
 			}
-			kfree(name);
+			kvfree(name);
 			break;
 		default:
 			f2fs_msg(sb, KERN_ERR,
@@ -799,13 +799,13 @@
 	if (f2fs_check_quota_options(sbi))
 		return -EINVAL;
 #else
-	if (f2fs_sb_has_quota_ino(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
 		f2fs_msg(sbi->sb, KERN_INFO,
 			 "Filesystem with quota feature cannot be mounted RDWR "
 			 "without CONFIG_QUOTA");
 		return -EINVAL;
 	}
-	if (f2fs_sb_has_project_quota(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+	if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
 		f2fs_msg(sb, KERN_ERR,
 			"Filesystem with project quota feature cannot be "
 			"mounted RDWR without CONFIG_QUOTA");
@@ -821,8 +821,8 @@
 	}
 
 	if (test_opt(sbi, INLINE_XATTR_SIZE)) {
-		if (!f2fs_sb_has_extra_attr(sb) ||
-			!f2fs_sb_has_flexible_inline_xattr(sb)) {
+		if (!f2fs_sb_has_extra_attr(sbi) ||
+			!f2fs_sb_has_flexible_inline_xattr(sbi)) {
 			f2fs_msg(sb, KERN_ERR,
 					"extra_attr or flexible_inline_xattr "
 					"feature is off");
@@ -1017,10 +1017,10 @@
 	for (i = 0; i < sbi->s_ndevs; i++) {
 		blkdev_put(FDEV(i).bdev, FMODE_EXCL);
 #ifdef CONFIG_BLK_DEV_ZONED
-		kfree(FDEV(i).blkz_type);
+		kvfree(FDEV(i).blkz_type);
 #endif
 	}
-	kfree(sbi->devs);
+	kvfree(sbi->devs);
 }
 
 static void f2fs_put_super(struct super_block *sb)
@@ -1058,9 +1058,6 @@
 		f2fs_write_checkpoint(sbi, &cpc);
 	}
 
-	/* f2fs_write_checkpoint can update stat informaion */
-	f2fs_destroy_stats(sbi);
-
 	/*
 	 * normally superblock is clean, so we need to release this.
 	 * In addition, EIO will skip do checkpoint, we need this as well.
@@ -1078,31 +1075,40 @@
 	f2fs_bug_on(sbi, sbi->fsync_node_num);
 
 	iput(sbi->node_inode);
+	sbi->node_inode = NULL;
+
 	iput(sbi->meta_inode);
+	sbi->meta_inode = NULL;
+
+	/*
+	 * iput() can update stat information, if f2fs_write_checkpoint()
+	 * above failed with error.
+	 */
+	f2fs_destroy_stats(sbi);
 
 	/* destroy f2fs internal modules */
 	f2fs_destroy_node_manager(sbi);
 	f2fs_destroy_segment_manager(sbi);
 
-	kfree(sbi->ckpt);
+	kvfree(sbi->ckpt);
 
 	f2fs_unregister_sysfs(sbi);
 
 	sb->s_fs_info = NULL;
 	if (sbi->s_chksum_driver)
 		crypto_free_shash(sbi->s_chksum_driver);
-	kfree(sbi->raw_super);
+	kvfree(sbi->raw_super);
 
 	destroy_device_list(sbi);
 	mempool_destroy(sbi->write_io_dummy);
 #ifdef CONFIG_QUOTA
 	for (i = 0; i < MAXQUOTAS; i++)
-		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+		kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
 #endif
 	destroy_percpu_info(sbi);
 	for (i = 0; i < NR_PAGE_TYPE; i++)
-		kfree(sbi->write_io[i]);
-	kfree(sbi);
+		kvfree(sbi->write_io[i]);
+	kvfree(sbi);
 }
 
 int f2fs_sync_fs(struct super_block *sb, int sync)
@@ -1431,7 +1437,7 @@
 	sbi->sb->s_flags |= SB_LAZYTIME;
 	set_opt(sbi, FLUSH_MERGE);
 	set_opt(sbi, DISCARD);
-	if (f2fs_sb_has_blkzoned(sbi->sb))
+	if (f2fs_sb_has_blkzoned(sbi))
 		set_opt_mode(sbi, F2FS_MOUNT_LFS);
 	else
 		set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
@@ -1457,19 +1463,16 @@
 
 	sbi->sb->s_flags |= SB_ACTIVE;
 
-	mutex_lock(&sbi->gc_mutex);
 	f2fs_update_time(sbi, DISABLE_TIME);
 
 	while (!f2fs_time_over(sbi, DISABLE_TIME)) {
+		mutex_lock(&sbi->gc_mutex);
 		err = f2fs_gc(sbi, true, false, NULL_SEGNO);
 		if (err == -ENODATA)
 			break;
-		if (err && err != -EAGAIN) {
-			mutex_unlock(&sbi->gc_mutex);
+		if (err && err != -EAGAIN)
 			return err;
-		}
 	}
-	mutex_unlock(&sbi->gc_mutex);
 
 	err = sync_filesystem(sbi->sb);
 	if (err)
@@ -1531,7 +1534,7 @@
 				GFP_KERNEL);
 			if (!org_mount_opt.s_qf_names[i]) {
 				for (j = 0; j < i; j++)
-					kfree(org_mount_opt.s_qf_names[j]);
+					kvfree(org_mount_opt.s_qf_names[j]);
 				return -ENOMEM;
 			}
 		} else {
@@ -1575,7 +1578,7 @@
 		sb->s_flags &= ~SB_RDONLY;
 		if (sb_any_quota_suspended(sb)) {
 			dquot_resume(sb, -1);
-		} else if (f2fs_sb_has_quota_ino(sb)) {
+		} else if (f2fs_sb_has_quota_ino(sbi)) {
 			err = f2fs_enable_quotas(sb);
 			if (err)
 				goto restore_opts;
@@ -1651,7 +1654,7 @@
 #ifdef CONFIG_QUOTA
 	/* Release old quota file names */
 	for (i = 0; i < MAXQUOTAS; i++)
-		kfree(org_mount_opt.s_qf_names[i]);
+		kvfree(org_mount_opt.s_qf_names[i]);
 #endif
 	/* Update the POSIXACL Flag */
 	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
@@ -1672,7 +1675,7 @@
 #ifdef CONFIG_QUOTA
 	F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
 	for (i = 0; i < MAXQUOTAS; i++) {
-		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+		kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
 		F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
 	}
 #endif
@@ -1817,7 +1820,7 @@
 	int enabled = 0;
 	int i, err;
 
-	if (f2fs_sb_has_quota_ino(sbi->sb) && rdonly) {
+	if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
 		err = f2fs_enable_quotas(sbi->sb);
 		if (err) {
 			f2fs_msg(sbi->sb, KERN_ERR,
@@ -1848,7 +1851,7 @@
 	unsigned long qf_inum;
 	int err;
 
-	BUG_ON(!f2fs_sb_has_quota_ino(sb));
+	BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
 
 	qf_inum = f2fs_qf_ino(sb, type);
 	if (!qf_inum)
@@ -1993,7 +1996,7 @@
 		goto out_put;
 
 	err = dquot_quota_off(sb, type);
-	if (err || f2fs_sb_has_quota_ino(sb))
+	if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
 		goto out_put;
 
 	inode_lock(inode);
@@ -2173,7 +2176,7 @@
 	 * if LOST_FOUND feature is enabled.
 	 *
 	 */
-	if (f2fs_sb_has_lost_found(sbi->sb) &&
+	if (f2fs_sb_has_lost_found(sbi) &&
 			inode->i_ino == F2FS_ROOT_INO(sbi))
 		return -EPERM;
 
@@ -2396,7 +2399,7 @@
 	__u32 crc = 0;
 
 	/* Check checksum_offset and crc in superblock */
-	if (le32_to_cpu(raw_super->feature) & F2FS_FEATURE_SB_CHKSUM) {
+	if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
 		crc_offset = le32_to_cpu(raw_super->checksum_offset);
 		if (crc_offset !=
 			offsetof(struct f2fs_super_block, crc)) {
@@ -2496,10 +2499,10 @@
 		return 1;
 	}
 
-	if (segment_count > (le32_to_cpu(raw_super->block_count) >> 9)) {
+	if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
 		f2fs_msg(sb, KERN_INFO,
-			"Wrong segment_count / block_count (%u > %u)",
-			segment_count, le32_to_cpu(raw_super->block_count));
+			"Wrong segment_count / block_count (%u > %llu)",
+			segment_count, le64_to_cpu(raw_super->block_count));
 		return 1;
 	}
 
@@ -2674,7 +2677,7 @@
 static void init_sb_info(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_super_block *raw_super = sbi->raw_super;
-	int i, j;
+	int i;
 
 	sbi->log_sectors_per_block =
 		le32_to_cpu(raw_super->log_sectors_per_block);
@@ -2692,7 +2695,10 @@
 	sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
 	sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
 	sbi->cur_victim_sec = NULL_SECNO;
+	sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
+	sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
 	sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
+	sbi->migration_granularity = sbi->segs_per_sec;
 
 	sbi->dir_level = DEF_DIR_LEVEL;
 	sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
@@ -2710,9 +2716,6 @@
 
 	INIT_LIST_HEAD(&sbi->s_list);
 	mutex_init(&sbi->umount_mutex);
-	for (i = 0; i < NR_PAGE_TYPE - 1; i++)
-		for (j = HOT; j < NR_TEMP_TYPE; j++)
-			mutex_init(&sbi->wio_mutex[i][j]);
 	init_rwsem(&sbi->io_order_lock);
 	spin_lock_init(&sbi->cp_lock);
 
@@ -2749,7 +2752,7 @@
 	unsigned int n = 0;
 	int err = -EIO;
 
-	if (!f2fs_sb_has_blkzoned(sbi->sb))
+	if (!f2fs_sb_has_blkzoned(sbi))
 		return 0;
 
 	if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
@@ -2800,7 +2803,7 @@
 		}
 	}
 
-	kfree(zones);
+	kvfree(zones);
 
 	return err;
 }
@@ -2860,7 +2863,7 @@
 
 	/* No valid superblock */
 	if (!*raw_super)
-		kfree(super);
+		kvfree(super);
 	else
 		err = 0;
 
@@ -2880,7 +2883,7 @@
 	}
 
 	/* we should update superblock crc here */
-	if (!recover && f2fs_sb_has_sb_chksum(sbi->sb)) {
+	if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
 		crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
 				offsetof(struct f2fs_super_block, crc));
 		F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
@@ -2968,7 +2971,7 @@
 
 #ifdef CONFIG_BLK_DEV_ZONED
 		if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
-				!f2fs_sb_has_blkzoned(sbi->sb)) {
+				!f2fs_sb_has_blkzoned(sbi)) {
 			f2fs_msg(sbi->sb, KERN_ERR,
 				"Zoned block device feature not enabled\n");
 			return -EINVAL;
@@ -3064,7 +3067,7 @@
 	sbi->raw_super = raw_super;
 
 	/* precompute checksum seed for metadata */
-	if (f2fs_sb_has_inode_chksum(sb))
+	if (f2fs_sb_has_inode_chksum(sbi))
 		sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
 						sizeof(raw_super->uuid));
 
@@ -3074,7 +3077,7 @@
 	 * devices, but mandatory for host-managed zoned block devices.
 	 */
 #ifndef CONFIG_BLK_DEV_ZONED
-	if (f2fs_sb_has_blkzoned(sb)) {
+	if (f2fs_sb_has_blkzoned(sbi)) {
 		f2fs_msg(sb, KERN_ERR,
 			 "Zoned block device support is not enabled\n");
 		err = -EOPNOTSUPP;
@@ -3101,13 +3104,13 @@
 
 #ifdef CONFIG_QUOTA
 	sb->dq_op = &f2fs_quota_operations;
-	if (f2fs_sb_has_quota_ino(sb))
+	if (f2fs_sb_has_quota_ino(sbi))
 		sb->s_qcop = &dquot_quotactl_sysfile_ops;
 	else
 		sb->s_qcop = &f2fs_quotactl_ops;
 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
 
-	if (f2fs_sb_has_quota_ino(sbi->sb)) {
+	if (f2fs_sb_has_quota_ino(sbi)) {
 		for (i = 0; i < MAXQUOTAS; i++) {
 			if (f2fs_qf_ino(sbi->sb, i))
 				sbi->nquota_files++;
@@ -3259,30 +3262,30 @@
 
 	f2fs_build_gc_manager(sbi);
 
+	err = f2fs_build_stats(sbi);
+	if (err)
+		goto free_nm;
+
 	/* get an inode for node space */
 	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
 	if (IS_ERR(sbi->node_inode)) {
 		f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
 		err = PTR_ERR(sbi->node_inode);
-		goto free_nm;
+		goto free_stats;
 	}
 
-	err = f2fs_build_stats(sbi);
-	if (err)
-		goto free_node_inode;
-
 	/* read root inode and dentry */
 	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
 	if (IS_ERR(root)) {
 		f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
 		err = PTR_ERR(root);
-		goto free_stats;
+		goto free_node_inode;
 	}
 	if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
 			!root->i_size || !root->i_nlink) {
 		iput(root);
 		err = -EINVAL;
-		goto free_stats;
+		goto free_node_inode;
 	}
 
 	sb->s_root = d_make_root(root); /* allocate root dentry */
@@ -3297,7 +3300,7 @@
 
 #ifdef CONFIG_QUOTA
 	/* Enable quota usage during mount */
-	if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
+	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
 		err = f2fs_enable_quotas(sb);
 		if (err)
 			f2fs_msg(sb, KERN_ERR,
@@ -3369,7 +3372,7 @@
 		if (err)
 			goto free_meta;
 	}
-	kfree(options);
+	kvfree(options);
 
 	/* recover broken superblock */
 	if (recovery) {
@@ -3392,7 +3395,7 @@
 free_meta:
 #ifdef CONFIG_QUOTA
 	f2fs_truncate_quota_inode_pages(sb);
-	if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
+	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
 		f2fs_quota_off_umount(sbi->sb);
 #endif
 	/*
@@ -3406,41 +3409,43 @@
 free_root_inode:
 	dput(sb->s_root);
 	sb->s_root = NULL;
-free_stats:
-	f2fs_destroy_stats(sbi);
 free_node_inode:
 	f2fs_release_ino_entry(sbi, true);
 	truncate_inode_pages_final(NODE_MAPPING(sbi));
 	iput(sbi->node_inode);
+	sbi->node_inode = NULL;
+free_stats:
+	f2fs_destroy_stats(sbi);
 free_nm:
 	f2fs_destroy_node_manager(sbi);
 free_sm:
 	f2fs_destroy_segment_manager(sbi);
 free_devices:
 	destroy_device_list(sbi);
-	kfree(sbi->ckpt);
+	kvfree(sbi->ckpt);
 free_meta_inode:
 	make_bad_inode(sbi->meta_inode);
 	iput(sbi->meta_inode);
+	sbi->meta_inode = NULL;
 free_io_dummy:
 	mempool_destroy(sbi->write_io_dummy);
 free_percpu:
 	destroy_percpu_info(sbi);
 free_bio_info:
 	for (i = 0; i < NR_PAGE_TYPE; i++)
-		kfree(sbi->write_io[i]);
+		kvfree(sbi->write_io[i]);
 free_options:
 #ifdef CONFIG_QUOTA
 	for (i = 0; i < MAXQUOTAS; i++)
-		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+		kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
 #endif
-	kfree(options);
+	kvfree(options);
 free_sb_buf:
-	kfree(raw_super);
+	kvfree(raw_super);
 free_sbi:
 	if (sbi->s_chksum_driver)
 		crypto_free_shash(sbi->s_chksum_driver);
-	kfree(sbi);
+	kvfree(sbi);
 
 	/* give only one another chance */
 	if (retry) {
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index b777cbd..0575edb 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -90,34 +90,34 @@
 	if (!sb->s_bdev->bd_part)
 		return snprintf(buf, PAGE_SIZE, "0\n");
 
-	if (f2fs_sb_has_encrypt(sb))
+	if (f2fs_sb_has_encrypt(sbi))
 		len += snprintf(buf, PAGE_SIZE - len, "%s",
 						"encryption");
-	if (f2fs_sb_has_blkzoned(sb))
+	if (f2fs_sb_has_blkzoned(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "blkzoned");
-	if (f2fs_sb_has_extra_attr(sb))
+	if (f2fs_sb_has_extra_attr(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "extra_attr");
-	if (f2fs_sb_has_project_quota(sb))
+	if (f2fs_sb_has_project_quota(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "projquota");
-	if (f2fs_sb_has_inode_chksum(sb))
+	if (f2fs_sb_has_inode_chksum(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "inode_checksum");
-	if (f2fs_sb_has_flexible_inline_xattr(sb))
+	if (f2fs_sb_has_flexible_inline_xattr(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "flexible_inline_xattr");
-	if (f2fs_sb_has_quota_ino(sb))
+	if (f2fs_sb_has_quota_ino(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "quota_ino");
-	if (f2fs_sb_has_inode_crtime(sb))
+	if (f2fs_sb_has_inode_crtime(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "inode_crtime");
-	if (f2fs_sb_has_lost_found(sb))
+	if (f2fs_sb_has_lost_found(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "lost_found");
-	if (f2fs_sb_has_sb_chksum(sb))
+	if (f2fs_sb_has_sb_chksum(sbi))
 		len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
 				len ? ", " : "", "sb_checksum");
 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
@@ -246,6 +246,11 @@
 		return count;
 	}
 
+	if (!strcmp(a->attr.name, "migration_granularity")) {
+		if (t == 0 || t > sbi->segs_per_sec)
+			return -EINVAL;
+	}
+
 	if (!strcmp(a->attr.name, "trim_sections"))
 		return -EINVAL;
 
@@ -406,6 +411,7 @@
 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, migration_granularity, migration_granularity);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
@@ -460,6 +466,7 @@
 	ATTR_LIST(min_hot_blocks),
 	ATTR_LIST(min_ssr_sections),
 	ATTR_LIST(max_victim_search),
+	ATTR_LIST(migration_granularity),
 	ATTR_LIST(dir_level),
 	ATTR_LIST(ram_thresh),
 	ATTR_LIST(ra_nid_pages),
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 7261245..18d5ffb 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -288,7 +288,7 @@
 static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
 				unsigned int index, unsigned int len,
 				const char *name, struct f2fs_xattr_entry **xe,
-				void **base_addr)
+				void **base_addr, int *base_size)
 {
 	void *cur_addr, *txattr_addr, *last_addr = NULL;
 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
@@ -299,8 +299,8 @@
 	if (!size && !inline_size)
 		return -ENODATA;
 
-	txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
-			inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
+	*base_size = inline_size + size + XATTR_PADDING_SIZE;
+	txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
 	if (!txattr_addr)
 		return -ENOMEM;
 
@@ -312,8 +312,10 @@
 
 		*xe = __find_inline_xattr(inode, txattr_addr, &last_addr,
 						index, len, name);
-		if (*xe)
+		if (*xe) {
+			*base_size = inline_size;
 			goto check;
+		}
 	}
 
 	/* read from xattr node block */
@@ -415,7 +417,7 @@
 		}
 
 		f2fs_wait_on_page_writeback(ipage ? ipage : in_page,
-							NODE, true);
+							NODE, true, true);
 		/* no need to use xattr node block */
 		if (hsize <= inline_size) {
 			err = f2fs_truncate_xattr_node(inode);
@@ -439,7 +441,7 @@
 			goto in_page_out;
 		}
 		f2fs_bug_on(sbi, new_nid);
-		f2fs_wait_on_page_writeback(xpage, NODE, true);
+		f2fs_wait_on_page_writeback(xpage, NODE, true, true);
 	} else {
 		struct dnode_of_data dn;
 		set_new_dnode(&dn, inode, NULL, NULL, new_nid);
@@ -474,6 +476,7 @@
 	int error = 0;
 	unsigned int size, len;
 	void *base_addr = NULL;
+	int base_size;
 
 	if (name == NULL)
 		return -EINVAL;
@@ -484,7 +487,7 @@
 
 	down_read(&F2FS_I(inode)->i_xattr_sem);
 	error = lookup_all_xattrs(inode, ipage, index, len, name,
-				&entry, &base_addr);
+				&entry, &base_addr, &base_size);
 	up_read(&F2FS_I(inode)->i_xattr_sem);
 	if (error)
 		return error;
@@ -498,6 +501,11 @@
 
 	if (buffer) {
 		char *pval = entry->e_name + entry->e_name_len;
+
+		if (base_size - (pval - (char *)base_addr) < size) {
+			error = -ERANGE;
+			goto out;
+		}
 		memcpy(buffer, pval, size);
 	}
 	error = size;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 648f0ca..998051c 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -744,17 +744,19 @@
 			       the gfs2 structures. */
 	if (default_acl) {
 		error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+		if (error)
+			goto fail_gunlock3;
 		posix_acl_release(default_acl);
+		default_acl = NULL;
 	}
 	if (acl) {
-		if (!error)
-			error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+		error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+		if (error)
+			goto fail_gunlock3;
 		posix_acl_release(acl);
+		acl = NULL;
 	}
 
-	if (error)
-		goto fail_gunlock3;
-
 	error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
 					     &gfs2_initxattrs, NULL);
 	if (error)
@@ -789,10 +791,8 @@
 	}
 	gfs2_rsqa_delete(ip, NULL);
 fail_free_acls:
-	if (default_acl)
-		posix_acl_release(default_acl);
-	if (acl)
-		posix_acl_release(acl);
+	posix_acl_release(default_acl);
+	posix_acl_release(acl);
 fail_gunlock:
 	gfs2_dir_no_add(&da);
 	gfs2_glock_dq_uninit(ghs);
diff --git a/fs/iomap.c b/fs/iomap.c
index 37da7a6..e57fb1e 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -117,12 +117,6 @@
 	atomic_set(&iop->read_count, 0);
 	atomic_set(&iop->write_count, 0);
 	bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
-
-	/*
-	 * migrate_page_move_mapping() assumes that pages with private data have
-	 * their count elevated by 1.
-	 */
-	get_page(page);
 	set_page_private(page, (unsigned long)iop);
 	SetPagePrivate(page);
 	return iop;
@@ -139,7 +133,6 @@
 	WARN_ON_ONCE(atomic_read(&iop->write_count));
 	ClearPagePrivate(page);
 	set_page_private(page, 0);
-	put_page(page);
 	kfree(iop);
 }
 
@@ -495,16 +488,29 @@
 }
 EXPORT_SYMBOL_GPL(iomap_readpages);
 
+/*
+ * iomap_is_partially_uptodate checks whether blocks within a page are
+ * uptodate or not.
+ *
+ * Returns true if all blocks which correspond to a file portion
+ * we want to read within the page are uptodate.
+ */
 int
 iomap_is_partially_uptodate(struct page *page, unsigned long from,
 		unsigned long count)
 {
 	struct iomap_page *iop = to_iomap_page(page);
 	struct inode *inode = page->mapping->host;
-	unsigned first = from >> inode->i_blkbits;
-	unsigned last = (from + count - 1) >> inode->i_blkbits;
+	unsigned len, first, last;
 	unsigned i;
 
+	/* Limit range to one page */
+	len = min_t(unsigned, PAGE_SIZE - from, count);
+
+	/* First and last blocks in range within page */
+	first = from >> inode->i_blkbits;
+	last = (from + len - 1) >> inode->i_blkbits;
+
 	if (iop) {
 		for (i = first; i <= last; i++)
 			if (!test_bit(i, iop->uptodate))
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 902a7dd..bb6ae38 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -101,7 +101,8 @@
 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
 
 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
-	cancel_delayed_work_sync(&c->wbuf_dwork);
+	if (jffs2_is_writebuffered(c))
+		cancel_delayed_work_sync(&c->wbuf_dwork);
 #endif
 
 	mutex_lock(&c->alloc_sem);
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index d20b92f..0a67dd4 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -442,7 +442,7 @@
 			fl->fl_start = req->a_res.lock.fl.fl_start;
 			fl->fl_end = req->a_res.lock.fl.fl_end;
 			fl->fl_type = req->a_res.lock.fl.fl_type;
-			fl->fl_pid = 0;
+			fl->fl_pid = -req->a_res.lock.fl.fl_pid;
 			break;
 		default:
 			status = nlm_stat_to_errno(req->a_res.status);
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index 7147e4a..9846f7e 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -127,7 +127,7 @@
 
 	locks_init_lock(fl);
 	fl->fl_owner = current->files;
-	fl->fl_pid   = (pid_t)lock->svid;
+	fl->fl_pid   = current->tgid;
 	fl->fl_flags = FL_POSIX;
 	fl->fl_type  = F_RDLCK;		/* as good as anything else */
 	start = ntohl(*p++);
@@ -269,7 +269,7 @@
 	memset(lock, 0, sizeof(*lock));
 	locks_init_lock(&lock->fl);
 	lock->svid = ~(u32) 0;
-	lock->fl.fl_pid = (pid_t)lock->svid;
+	lock->fl.fl_pid = current->tgid;
 
 	if (!(p = nlm_decode_cookie(p, &argp->cookie))
 	 || !(p = xdr_decode_string_inplace(p, &lock->caller,
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index 7ed9edf..70154f3 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -119,7 +119,7 @@
 
 	locks_init_lock(fl);
 	fl->fl_owner = current->files;
-	fl->fl_pid   = (pid_t)lock->svid;
+	fl->fl_pid   = current->tgid;
 	fl->fl_flags = FL_POSIX;
 	fl->fl_type  = F_RDLCK;		/* as good as anything else */
 	p = xdr_decode_hyper(p, &start);
@@ -266,7 +266,7 @@
 	memset(lock, 0, sizeof(*lock));
 	locks_init_lock(&lock->fl);
 	lock->svid = ~(u32) 0;
-	lock->fl.fl_pid = (pid_t)lock->svid;
+	lock->fl.fl_pid = current->tgid;
 
 	if (!(p = nlm4_decode_cookie(p, &argp->cookie))
 	 || !(p = xdr_decode_string_inplace(p, &lock->caller,
diff --git a/fs/namei.c b/fs/namei.c
index bd04eef..83c9b42 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3735,8 +3735,7 @@
 	if (error)
 		return error;
 
-	if ((S_ISCHR(mode) || S_ISBLK(mode)) &&
-	    !ns_capable(dentry->d_sb->s_user_ns, CAP_MKNOD))
+	if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
 		return -EPERM;
 
 	if (!dir->i_op->mknod)
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 586726a..d790faf 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -621,11 +621,12 @@
 	nfs_set_page_writeback(page);
 	WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
 
-	ret = 0;
+	ret = req->wb_context->error;
 	/* If there is a fatal error that covers this write, just exit */
-	if (nfs_error_is_fatal_on_server(req->wb_context->error))
+	if (nfs_error_is_fatal_on_server(ret))
 		goto out_launder;
 
+	ret = 0;
 	if (!nfs_pageio_add_request(pgio, req)) {
 		ret = pgio->pg_error;
 		/*
@@ -635,9 +636,9 @@
 			nfs_context_set_write_error(req->wb_context, ret);
 			if (nfs_error_is_fatal_on_server(ret))
 				goto out_launder;
-		}
+		} else
+			ret = -EAGAIN;
 		nfs_redirty_request(req);
-		ret = -EAGAIN;
 	} else
 		nfs_add_stats(page_file_mapping(page)->host,
 				NFSIOS_WRITEPAGES, 1);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 9d6b4f0..f35aa9f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1015,8 +1015,6 @@
 
 	nvecs = svc_fill_write_vector(rqstp, write->wr_pagelist,
 				      &write->wr_head, write->wr_buflen);
-	if (!nvecs)
-		return nfserr_io;
 	WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
 
 	status = nfsd_vfs_write(rqstp, &cstate->current_fh, filp,
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index f9a95fb..0bc716c 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -726,8 +726,10 @@
 		return -EBADF;
 
 	/* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
-	if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE)))
-		return -EINVAL;
+	if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
+		ret = -EINVAL;
+		goto fput_and_out;
+	}
 
 	/* verify that this is indeed an inotify instance */
 	if (unlikely(f.file->f_op != &inotify_fops)) {
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 7642b67..3020823 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -345,13 +345,18 @@
 	if (num_used
 	    || alloc->id1.bitmap1.i_used
 	    || alloc->id1.bitmap1.i_total
-	    || la->la_bm_off)
-		mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
+	    || la->la_bm_off) {
+		mlog(ML_ERROR, "inconsistent detected, clean journal with"
+		     " unrecovered local alloc, please run fsck.ocfs2!\n"
 		     "found = %u, set = %u, taken = %u, off = %u\n",
 		     num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
 		     le32_to_cpu(alloc->id1.bitmap1.i_total),
 		     OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
 
+		status = -EINVAL;
+		goto bail;
+	}
+
 	osb->local_alloc_bh = alloc_bh;
 	osb->local_alloc_state = OCFS2_LA_ENABLED;
 
diff --git a/fs/pnode.c b/fs/pnode.c
index 56f9a28..681916d 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -608,36 +608,18 @@
 	return 0;
 }
 
-/*
- *  Iterates over all slaves, and slaves of slaves.
- */
-static struct mount *next_descendent(struct mount *root, struct mount *cur)
-{
-	if (!IS_MNT_NEW(cur) && !list_empty(&cur->mnt_slave_list))
-		return first_slave(cur);
-	do {
-		struct mount *master = cur->mnt_master;
-
-		if (!master || cur->mnt_slave.next != &master->mnt_slave_list) {
-			struct mount *next = next_slave(cur);
-
-			return (next == root) ? NULL : next;
-		}
-		cur = master;
-	} while (cur != root);
-	return NULL;
-}
-
 void propagate_remount(struct mount *mnt)
 {
-	struct mount *m = mnt;
+	struct mount *parent = mnt->mnt_parent;
+	struct mount *p = mnt, *m;
 	struct super_block *sb = mnt->mnt.mnt_sb;
 
-	if (sb->s_op->copy_mnt_data) {
-		m = next_descendent(mnt, m);
-		while (m) {
+	if (!sb->s_op->copy_mnt_data)
+		return;
+	for (p = propagation_next(parent, parent); p;
+				p = propagation_next(p, parent)) {
+		m = __lookup_mnt(&p->mnt, mnt->mnt_mountpoint);
+		if (m)
 			sb->s_op->copy_mnt_data(m->mnt.data, mnt->mnt.data);
-			m = next_descendent(mnt, m);
-		}
 	}
 }
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 89921a0..4d598a3 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -464,7 +464,7 @@
 
 	inode = new_inode(sb);
 	if (!inode)
-		goto out;
+		return ERR_PTR(-ENOMEM);
 
 	inode->i_ino = get_next_ino();
 
@@ -474,8 +474,7 @@
 	if (unlikely(head->unregistering)) {
 		spin_unlock(&sysctl_lock);
 		iput(inode);
-		inode = NULL;
-		goto out;
+		return ERR_PTR(-ENOENT);
 	}
 	ei->sysctl = head;
 	ei->sysctl_entry = table;
@@ -500,7 +499,6 @@
 	if (root->set_ownership)
 		root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
 
-out:
 	return inode;
 }
 
@@ -549,10 +547,11 @@
 			goto out;
 	}
 
-	err = ERR_PTR(-ENOMEM);
 	inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
-	if (!inode)
+	if (IS_ERR(inode)) {
+		err = ERR_CAST(inode);
 		goto out;
+	}
 
 	d_set_d_op(dentry, &proc_sys_dentry_operations);
 	err = d_splice_alias(inode, dentry);
@@ -685,7 +684,7 @@
 		if (d_in_lookup(child)) {
 			struct dentry *res;
 			inode = proc_sys_make_inode(dir->d_sb, head, table);
-			if (!inode) {
+			if (IS_ERR(inode)) {
 				d_lookup_done(child);
 				dput(child);
 				return false;
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 03cd593..eb67bb7 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -713,18 +713,15 @@
 {
 	struct device *dev = &pdev->dev;
 	struct ramoops_platform_data *pdata = dev->platform_data;
+	struct ramoops_platform_data pdata_local;
 	struct ramoops_context *cxt = &oops_cxt;
 	size_t dump_mem_sz;
 	phys_addr_t paddr;
 	int err = -EINVAL;
 
 	if (dev_of_node(dev) && !pdata) {
-		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-		if (!pdata) {
-			pr_err("cannot allocate platform data buffer\n");
-			err = -ENOMEM;
-			goto fail_out;
-		}
+		pdata = &pdata_local;
+		memset(pdata, 0, sizeof(*pdata));
 
 		err = ramoops_parse_dt(pdev, pdata);
 		if (err < 0)
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 0792595..3c777ec 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -496,6 +496,11 @@
 	sig ^= PERSISTENT_RAM_SIG;
 
 	if (prz->buffer->sig == sig) {
+		if (buffer_size(prz) == 0) {
+			pr_debug("found existing empty buffer\n");
+			return 0;
+		}
+
 		if (buffer_size(prz) > prz->buffer_size ||
 		    buffer_start(prz) > buffer_size(prz))
 			pr_info("found existing invalid buffer, size %zu, start %zu\n",
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index f0cbf58..fd5dd80 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -791,7 +791,8 @@
 /* Return true if quotactl command is manipulating quota on/off state */
 static bool quotactl_cmd_onoff(int cmd)
 {
-	return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF);
+	return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) ||
+		 (cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF);
 }
 
 /*
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 4844538..c6f9b22 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -210,6 +210,38 @@
 }
 
 /**
+ * inode_still_linked - check whether inode in question will be re-linked.
+ * @c: UBIFS file-system description object
+ * @rino: replay entry to test
+ *
+ * O_TMPFILE files can be re-linked, this means link count goes from 0 to 1.
+ * This case needs special care, otherwise all references to the inode will
+ * be removed upon the first replay entry of an inode with link count 0
+ * is found.
+ */
+static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
+{
+	struct replay_entry *r;
+
+	ubifs_assert(c, rino->deletion);
+	ubifs_assert(c, key_type(c, &rino->key) == UBIFS_INO_KEY);
+
+	/*
+	 * Find the most recent entry for the inode behind @rino and check
+	 * whether it is a deletion.
+	 */
+	list_for_each_entry_reverse(r, &c->replay_list, list) {
+		ubifs_assert(c, r->sqnum >= rino->sqnum);
+		if (key_inum(c, &r->key) == key_inum(c, &rino->key))
+			return r->deletion == 0;
+
+	}
+
+	ubifs_assert(c, 0);
+	return false;
+}
+
+/**
  * apply_replay_entry - apply a replay entry to the TNC.
  * @c: UBIFS file-system description object
  * @r: replay entry to apply
@@ -236,6 +268,11 @@
 			{
 				ino_t inum = key_inum(c, &r->key);
 
+				if (inode_still_linked(c, r)) {
+					err = 0;
+					break;
+				}
+
 				err = ubifs_tnc_remove_ino(c, inum);
 				break;
 			}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 6f9ba3b..9beff19 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -736,10 +736,18 @@
 	struct userfaultfd_ctx *ctx;
 
 	ctx = vma->vm_userfaultfd_ctx.ctx;
-	if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
+
+	if (!ctx)
+		return;
+
+	if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
 		vm_ctx->ctx = ctx;
 		userfaultfd_ctx_get(ctx);
 		WRITE_ONCE(ctx->mmap_changing, true);
+	} else {
+		/* Drop uffd context if remap feature not enabled */
+		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+		vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
 	}
 }
 
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 89f3b03..e3667c9 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -3,7 +3,7 @@
 #define _4LEVEL_FIXUP_H
 
 #define __ARCH_HAS_4LEVEL_HACK
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
 
 #define PUD_SHIFT			PGDIR_SHIFT
 #define PUD_SIZE			PGDIR_SIZE
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index 9c2e070..73474bb 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -3,7 +3,7 @@
 #define _5LEVEL_FIXUP_H
 
 #define __ARCH_HAS_5LEVEL_HACK
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
 
 #define P4D_SHIFT			PGDIR_SHIFT
 #define P4D_SIZE			PGDIR_SIZE
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
index 0c34215..1d6dd38 100644
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -5,7 +5,7 @@
 #ifndef __ASSEMBLY__
 #include <asm-generic/5level-fixup.h>
 
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
 
 /*
  * Having the pud type consist of a pgd gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 1a29b2a..04cb913 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -4,7 +4,7 @@
 
 #ifndef __ASSEMBLY__
 
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
 
 typedef struct { pgd_t pgd; } p4d_t;
 
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index f35f6e8..b85b827 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -8,7 +8,7 @@
 
 struct mm_struct;
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 
 /*
  * Having the pmd type consist of a pud gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index e950b9c..9bef475 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -9,7 +9,7 @@
 #else
 #include <asm-generic/pgtable-nop4d.h>
 
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
 
 /*
  * Having the pud type consist of a p4d gets the size right, and allows
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 88ebc61..15fd027 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1127,4 +1127,20 @@
 #endif
 #endif
 
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm)	__is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm)	__is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm)	__is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
 #endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/dt-bindings/clock/qcom,dispcc-kona.h b/include/dt-bindings/clock/qcom,dispcc-kona.h
index e85f00b..f48b27a 100644
--- a/include/dt-bindings/clock/qcom,dispcc-kona.h
+++ b/include/dt-bindings/clock/qcom,dispcc-kona.h
@@ -1,98 +1,80 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
 
 #ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_KONA_H
 #define _DT_BINDINGS_CLK_QCOM_DISP_CC_KONA_H
 
-#define DISP_CC_DEBUG_CLK					0
-#define DISP_CC_MDSS_AHB_CLK					1
-#define DISP_CC_MDSS_AHB_CLK_SRC				2
-#define DISP_CC_MDSS_BYTE0_CLK					3
-#define DISP_CC_MDSS_BYTE0_CLK_SRC				4
-#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC				5
-#define DISP_CC_MDSS_BYTE0_INTF_CLK				6
-#define DISP_CC_MDSS_BYTE1_CLK					7
-#define DISP_CC_MDSS_BYTE1_CLK_SRC				8
-#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC				9
-#define DISP_CC_MDSS_BYTE1_INTF_CLK				10
-#define DISP_CC_MDSS_DP_AUX1_CLK				11
-#define DISP_CC_MDSS_DP_AUX1_CLK_SRC				12
-#define DISP_CC_MDSS_DP_AUX_CLK					13
-#define DISP_CC_MDSS_DP_AUX_CLK_SRC				14
-#define DISP_CC_MDSS_DP_CRYPTO1_CLK				15
-#define DISP_CC_MDSS_DP_CRYPTO1_CLK_SRC				16
-#define DISP_CC_MDSS_DP_CRYPTO_CLK				17
-#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC				18
-#define DISP_CC_MDSS_DP_LINK1_CLK				19
-#define DISP_CC_MDSS_DP_LINK1_CLK_SRC				20
-#define DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC			21
-#define DISP_CC_MDSS_DP_LINK1_INTF_CLK				22
-#define DISP_CC_MDSS_DP_LINK_CLK				23
-#define DISP_CC_MDSS_DP_LINK_CLK_SRC				24
-#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC			25
-#define DISP_CC_MDSS_DP_LINK_INTF_CLK				26
-#define DISP_CC_MDSS_DP_PIXEL1_CLK				27
-#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC				28
-#define DISP_CC_MDSS_DP_PIXEL2_CLK				29
-#define DISP_CC_MDSS_DP_PIXEL2_CLK_SRC				30
-#define DISP_CC_MDSS_DP_PIXEL_CLK				31
-#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC				32
-#define DISP_CC_MDSS_EDP_AUX_CLK				33
-#define DISP_CC_MDSS_EDP_AUX_CLK_SRC				34
-#define DISP_CC_MDSS_EDP_GTC_CLK				35
-#define DISP_CC_MDSS_EDP_GTC_CLK_SRC				36
-#define DISP_CC_MDSS_EDP_LINK_CLK				37
-#define DISP_CC_MDSS_EDP_LINK_CLK_SRC				38
-#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC			39
-#define DISP_CC_MDSS_EDP_LINK_INTF_CLK				40
-#define DISP_CC_MDSS_EDP_PIXEL_CLK				41
-#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC				42
-#define DISP_CC_MDSS_ESC0_CLK					43
-#define DISP_CC_MDSS_ESC0_CLK_SRC				44
-#define DISP_CC_MDSS_ESC1_CLK					45
-#define DISP_CC_MDSS_ESC1_CLK_SRC				46
-#define DISP_CC_MDSS_MDP_CLK					47
-#define DISP_CC_MDSS_MDP_CLK_SRC				48
-#define DISP_CC_MDSS_MDP_LUT_CLK				49
-#define DISP_CC_MDSS_NON_GDSC_AHB_CLK				50
-#define DISP_CC_MDSS_PCLK0_CLK					51
-#define DISP_CC_MDSS_PCLK0_CLK_SRC				52
-#define DISP_CC_MDSS_PCLK1_CLK					53
-#define DISP_CC_MDSS_PCLK1_CLK_SRC				54
-#define DISP_CC_MDSS_ROT_CLK					55
-#define DISP_CC_MDSS_ROT_CLK_SRC				56
-#define DISP_CC_MDSS_RSCC_AHB_CLK				57
-#define DISP_CC_MDSS_RSCC_VSYNC_CLK				58
-#define DISP_CC_MDSS_SPDM_DEBUG_CLK				59
-#define DISP_CC_MDSS_SPDM_DP_CRYPTO_CLK				60
-#define DISP_CC_MDSS_SPDM_DP_CRYPTO_DIV_CLK_SRC			61
-#define DISP_CC_MDSS_SPDM_DP_PIXEL1_CLK				62
-#define DISP_CC_MDSS_SPDM_DP_PIXEL1_DIV_CLK_SRC			63
-#define DISP_CC_MDSS_SPDM_DP_PIXEL_CLK				64
-#define DISP_CC_MDSS_SPDM_DP_PIXEL_DIV_CLK_SRC			65
-#define DISP_CC_MDSS_SPDM_MDP_CLK				66
-#define DISP_CC_MDSS_SPDM_MDP_DIV_CLK_SRC			67
-#define DISP_CC_MDSS_SPDM_PCLK0_CLK				68
-#define DISP_CC_MDSS_SPDM_PCLK0_DIV_CLK_SRC			69
-#define DISP_CC_MDSS_SPDM_PCLK1_CLK				70
-#define DISP_CC_MDSS_SPDM_PCLK1_DIV_CLK_SRC			71
-#define DISP_CC_MDSS_SPDM_ROT_CLK				72
-#define DISP_CC_MDSS_SPDM_ROT_DIV_CLK_SRC			73
-#define DISP_CC_MDSS_VSYNC_CLK					74
-#define DISP_CC_MDSS_VSYNC_CLK_SRC				75
-#define DISP_CC_PLL0						76
-#define DISP_CC_PLL1						77
-#define DISP_CC_PLL_TEST_CLK					78
-#define DISP_CC_PLL_TEST_DIV_CLK_SRC				79
-#define DISP_CC_SLEEP_CLK					80
-#define DISP_CC_SLEEP_CLK_SRC					81
-#define DISP_CC_XO_CLK						82
-#define DISP_CC_XO_CLK_SRC					83
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB_CLK					0
+#define DISP_CC_MDSS_AHB_CLK_SRC				1
+#define DISP_CC_MDSS_BYTE0_CLK					2
+#define DISP_CC_MDSS_BYTE0_CLK_SRC				3
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC				4
+#define DISP_CC_MDSS_BYTE0_INTF_CLK				5
+#define DISP_CC_MDSS_BYTE1_CLK					6
+#define DISP_CC_MDSS_BYTE1_CLK_SRC				7
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC				8
+#define DISP_CC_MDSS_BYTE1_INTF_CLK				9
+#define DISP_CC_MDSS_DP_AUX1_CLK				10
+#define DISP_CC_MDSS_DP_AUX1_CLK_SRC				11
+#define DISP_CC_MDSS_DP_AUX_CLK					12
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC				13
+#define DISP_CC_MDSS_DP_CRYPTO1_CLK				14
+#define DISP_CC_MDSS_DP_CRYPTO1_CLK_SRC				15
+#define DISP_CC_MDSS_DP_CRYPTO_CLK				16
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC				17
+#define DISP_CC_MDSS_DP_LINK1_CLK				18
+#define DISP_CC_MDSS_DP_LINK1_CLK_SRC				19
+#define DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC			20
+#define DISP_CC_MDSS_DP_LINK1_INTF_CLK				21
+#define DISP_CC_MDSS_DP_LINK_CLK				22
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC				23
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC			24
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK				25
+#define DISP_CC_MDSS_DP_PIXEL1_CLK				26
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC				27
+#define DISP_CC_MDSS_DP_PIXEL2_CLK				28
+#define DISP_CC_MDSS_DP_PIXEL2_CLK_SRC				29
+#define DISP_CC_MDSS_DP_PIXEL_CLK				30
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC				31
+#define DISP_CC_MDSS_EDP_AUX_CLK				32
+#define DISP_CC_MDSS_EDP_AUX_CLK_SRC				33
+#define DISP_CC_MDSS_EDP_GTC_CLK				34
+#define DISP_CC_MDSS_EDP_GTC_CLK_SRC				35
+#define DISP_CC_MDSS_EDP_LINK_CLK				36
+#define DISP_CC_MDSS_EDP_LINK_CLK_SRC				37
+#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC			38
+#define DISP_CC_MDSS_EDP_LINK_INTF_CLK				39
+#define DISP_CC_MDSS_EDP_PIXEL_CLK				40
+#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC				41
+#define DISP_CC_MDSS_ESC0_CLK					42
+#define DISP_CC_MDSS_ESC0_CLK_SRC				43
+#define DISP_CC_MDSS_ESC1_CLK					44
+#define DISP_CC_MDSS_ESC1_CLK_SRC				45
+#define DISP_CC_MDSS_MDP_CLK					46
+#define DISP_CC_MDSS_MDP_CLK_SRC				47
+#define DISP_CC_MDSS_MDP_LUT_CLK				48
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK				49
+#define DISP_CC_MDSS_PCLK0_CLK					50
+#define DISP_CC_MDSS_PCLK0_CLK_SRC				51
+#define DISP_CC_MDSS_PCLK1_CLK					52
+#define DISP_CC_MDSS_PCLK1_CLK_SRC				53
+#define DISP_CC_MDSS_ROT_CLK					54
+#define DISP_CC_MDSS_ROT_CLK_SRC				55
+#define DISP_CC_MDSS_RSCC_AHB_CLK				56
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK				57
+#define DISP_CC_MDSS_VSYNC_CLK					58
+#define DISP_CC_MDSS_VSYNC_CLK_SRC				59
+#define DISP_CC_PLL0						60
+#define DISP_CC_PLL1						61
+#define DISP_CC_SLEEP_CLK					62
+#define DISP_CC_SLEEP_CLK_SRC					63
+#define DISP_CC_XO_CLK						64
 
-#define MDSS_CORE_GDSC						0
-
+/* DISP_CC resets */
 #define DISP_CC_MDSS_CORE_BCR					0
 #define DISP_CC_MDSS_RSCC_BCR					1
-#define DISP_CC_MDSS_SPDM_BCR					2
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,gcc-lito.h b/include/dt-bindings/clock/qcom,gcc-lito.h
index 25c44e6..a8d86ec 100644
--- a/include/dt-bindings/clock/qcom,gcc-lito.h
+++ b/include/dt-bindings/clock/qcom,gcc-lito.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
 
 #ifndef _DT_BINDINGS_CLK_QCOM_GCC_LITO_H
 #define _DT_BINDINGS_CLK_QCOM_GCC_LITO_H
@@ -153,5 +153,8 @@
 #define GCC_UFS_PHY_BCR						12
 #define GCC_USB30_PRIM_BCR					13
 #define GCC_USB_PHY_CFG_AHB2PHY_BCR				14
+#define GCC_QUSB2PHY_PRIM_BCR					15
+#define GCC_USB3_DP_PHY_PRIM_BCR				16
+#define GCC_USB3_PHY_PRIM_BCR					17
 
 #endif
diff --git a/include/dt-bindings/regulator/qcom,rpmh-regulator-levels.h b/include/dt-bindings/regulator/qcom,rpmh-regulator-levels.h
index 65c9644..086c4de 100644
--- a/include/dt-bindings/regulator/qcom,rpmh-regulator-levels.h
+++ b/include/dt-bindings/regulator/qcom,rpmh-regulator-levels.h
@@ -12,6 +12,7 @@
 #define RPMH_REGULATOR_LEVEL_LOW_SVS	64
 #define RPMH_REGULATOR_LEVEL_SVS	128
 #define RPMH_REGULATOR_LEVEL_SVS_L1	192
+#define RPMH_REGULATOR_LEVEL_SVS_L2	224
 #define RPMH_REGULATOR_LEVEL_NOM	256
 #define RPMH_REGULATOR_LEVEL_NOM_L1	320
 #define RPMH_REGULATOR_LEVEL_NOM_L2	336
diff --git a/include/dt-bindings/thermal/thermal.h b/include/dt-bindings/thermal/thermal.h
index b5e6b00..edd2a15 100644
--- a/include/dt-bindings/thermal/thermal.h
+++ b/include/dt-bindings/thermal/thermal.h
@@ -12,6 +12,7 @@
 
 /* On cooling devices upper and lower limits */
 #define THERMAL_NO_LIMIT		(~0)
+#define THERMAL_MAX_LIMIT		(THERMAL_NO_LIMIT - 1)
 
 #endif
 
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 9a6bc09..c311571 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -258,6 +258,14 @@
  */
 static inline void wb_put(struct bdi_writeback *wb)
 {
+	if (WARN_ON_ONCE(!wb->bdi)) {
+		/*
+		 * A driver bug might cause a file to be removed before bdi was
+		 * initialized.
+		 */
+		return;
+	}
+
 	if (wb != &wb->bdi->wb)
 		percpu_ref_put(&wb->refcnt);
 }
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index 7cca5f8..f3c4351 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -6,6 +6,7 @@
 
 struct bcma_soc {
 	struct bcma_bus bus;
+	struct device *dev;
 };
 
 int __init bcma_host_soc_register(struct bcma_soc *soc);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 1fd6fa8..9139372 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -134,6 +134,7 @@
 	struct bpf_func_state *frame[MAX_CALL_FRAMES];
 	struct bpf_verifier_state *parent;
 	u32 curframe;
+	bool speculative;
 };
 
 /* linked list of verifier states used to prune search */
@@ -142,15 +143,25 @@
 	struct bpf_verifier_state_list *next;
 };
 
+/* Possible states for alu_state member. */
+#define BPF_ALU_SANITIZE_SRC		1U
+#define BPF_ALU_SANITIZE_DST		2U
+#define BPF_ALU_NEG_VALUE		(1U << 2)
+#define BPF_ALU_NON_POINTER		(1U << 3)
+#define BPF_ALU_SANITIZE		(BPF_ALU_SANITIZE_SRC | \
+					 BPF_ALU_SANITIZE_DST)
+
 struct bpf_insn_aux_data {
 	union {
 		enum bpf_reg_type ptr_type;	/* pointer type for load/store insns */
 		unsigned long map_state;	/* pointer/poison value for maps */
 		s32 call_imm;			/* saved imm field of call insn */
+		u32 alu_limit;			/* limit for add/sub register with pointer */
 	};
 	int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
 	int sanitize_stack_off; /* stack slot to be cleared */
 	bool seen; /* this insn was processed by the verifier */
+	u8 alu_state; /* used in combination with alu_limit */
 };
 
 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -186,6 +197,8 @@
  * one verifier_env per bpf_check() call
  */
 struct bpf_verifier_env {
+	u32 insn_idx;
+	u32 prev_insn_idx;
 	struct bpf_prog *prog;		/* eBPF program being verified */
 	const struct bpf_verifier_ops *ops;
 	struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 4d36b27..0242f6e 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -75,7 +75,7 @@
 #define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
 #endif
 
-#ifdef RETPOLINE
+#ifdef CONFIG_RETPOLINE
 #define __noretpoline __attribute__((indirect_branch("keep")))
 #endif
 
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index f39488c3..454a6ce 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -90,6 +90,20 @@
 };
 
 /**
+ * struct coresight_reg_clk - regulators and clocks need by coresight
+ * @nr_reg:	number of regulators
+ * @nr_clk:	number of clocks
+ * @reg:	regulator list
+ * @clk:	clock list
+ */
+struct coresight_reg_clk {
+	int nr_reg;
+	int nr_clk;
+	struct regulator **reg;
+	struct clk **clk;
+};
+
+/**
  * struct coresight_platform_data - data harvested from the DT specification
  * @cpu:	the CPU a source belongs to. Only applicable for ETM/PTMs.
  * @name:	name of the component as shown under sysfs.
@@ -100,6 +114,8 @@
  * @child_ports:child component port number the current component is
 		connected  to.
  * @nr_outport:	number of output ports for this component.
+ * @clk:	The clock this component is associated to.
+ * @reg_clk:	as defined by @coresight_reg_clk.
  */
 struct coresight_platform_data {
 	int cpu;
@@ -110,6 +126,8 @@
 	const char **child_names;
 	int *child_ports;
 	int nr_outport;
+	struct clk *clk;
+	struct coresight_reg_clk *reg_clk;
 };
 
 /**
@@ -165,6 +183,8 @@
  * @activated:	'true' only if a _sink_ has been activated.  A sink can be
 		activated but not yet enabled.  Enabling for a _sink_
 		happens when a source has been selected for that it.
+ * @abort:     captures sink trace on abort.
+ * @reg_clk:	as defined by @coresight_reg_clk.
  */
 struct coresight_device {
 	struct coresight_connection *conns;
@@ -179,6 +199,7 @@
 	bool orphan;
 	bool enable;	/* true only if configured as part of a path */
 	bool activated;	/* true only if a sink is part of a path */
+	struct coresight_reg_clk *reg_clk;
 };
 
 #define to_coresight_device(d) container_of(d, struct coresight_device, dev)
@@ -275,6 +296,9 @@
 extern void coresight_disable(struct coresight_device *csdev);
 extern int coresight_timeout(void __iomem *addr, u32 offset,
 			     int position, int value);
+extern void coresight_abort(void);
+extern void coresight_disable_reg_clk(struct coresight_device *csdev);
+extern void coresight_enable_reg_clk(struct coresight_device *csdev);
 #else
 static inline struct coresight_device *
 coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -284,6 +308,9 @@
 static inline void coresight_disable(struct coresight_device *csdev) {}
 static inline int coresight_timeout(void __iomem *addr, u32 offset,
 				     int position, int value) { return 1; }
+static inline void coresight_abort(void) {}
+static inline void coresight_disable_reg_clk(struct coresight_device *csdev) {}
+static inline void coresight_enable_reg_clk(struct coresight_device *csdev) {}
 #endif
 
 #ifdef CONFIG_OF
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index 7775dd7..65c92e6 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -30,12 +30,6 @@
 
 struct cpufreq_policy;
 
-typedef int (*plat_mitig_t)(int cpu, u32 clip_freq);
-
-struct cpu_cooling_ops {
-	plat_mitig_t ceil_limit, floor_limit;
-};
-
 #ifdef CONFIG_CPU_THERMAL
 /**
  * cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -44,10 +38,6 @@
 struct thermal_cooling_device *
 cpufreq_cooling_register(struct cpufreq_policy *policy);
 
-struct thermal_cooling_device *
-cpufreq_platform_cooling_register(struct cpufreq_policy *policy,
-					struct cpu_cooling_ops *ops);
-
 /**
  * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
  * @cdev: thermal cooling device pointer.
@@ -81,13 +71,6 @@
 {
 	return NULL;
 }
-
-static inline struct thermal_cooling_device *
-cpufreq_platform_cooling_register(struct cpufreq_policy *policy,
-					struct cpu_cooling_ops *ops)
-{
-	return NULL;
-}
 #endif /* defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) */
 
 #ifdef CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index eb7b26f..6881973 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -950,6 +950,14 @@
 }
 #endif
 
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+			struct cpufreq_governor *old_gov);
+#else
+static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+			struct cpufreq_governor *old_gov) { }
+#endif
+
 extern void arch_freq_prepare_all(void);
 extern unsigned int arch_freq_get_on_cpu(int cpu);
 
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 30491e4..0dd5ffb 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -138,9 +138,9 @@
 	/* Must be the last timer callback */
 	CPUHP_AP_DUMMY_TIMER_STARTING,
 	CPUHP_AP_ARM_XEN_STARTING,
-	CPUHP_AP_ARM_CORESIGHT_STARTING,
 	CPUHP_AP_ARM_SAVE_RESTORE_CORESIGHT4_STARTING,
 	CPUHP_AP_ARM_MM_CORESIGHT4_STARTING,
+	CPUHP_AP_ARM_CORESIGHT_STARTING,
 	CPUHP_AP_ARM64_ISNDEP_STARTING,
 	CPUHP_AP_SMPCFD_DYING,
 	CPUHP_AP_X86_TBOOT_DYING,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index dc2a6c8..9e15527 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -823,6 +823,10 @@
 void dma_release_declared_memory(struct device *dev);
 void *dma_mark_declared_memory_occupied(struct device *dev,
 					dma_addr_t device_addr, size_t size);
+dma_addr_t dma_get_device_base(struct device *dev,
+			       struct dma_coherent_mem *mem);
+unsigned long dma_get_size(struct dma_coherent_mem *mem);
+
 #else
 static inline int
 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
@@ -842,6 +846,17 @@
 {
 	return ERR_PTR(-EBUSY);
 }
+static inline dma_addr_t
+dma_get_device_base(struct device *dev, struct dma_coherent_mem *mem)
+{
+	return 0;
+}
+
+static inline unsigned long dma_get_size(struct dma_coherent_mem *mem)
+{
+	return 0;
+}
+
 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
 
 #ifdef CONFIG_HAS_DMA
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 55deab2..aa027f7 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -27,7 +27,6 @@
  * em_perf_domain - Performance domain
  * @table:		List of capacity states, in ascending order
  * @nr_cap_states:	Number of capacity states
- * @kobj:		Kobject used to expose the domain in sysfs
  * @cpus:		Cpumask covering the CPUs of the domain
  *
  * A "performance domain" represents a group of CPUs whose performance is
@@ -38,7 +37,6 @@
 struct em_perf_domain {
 	struct em_cap_state *table;
 	int nr_cap_states;
-	struct kobject kobj;
 	unsigned long cpus[0];
 };
 
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 6791a0a..1a39d57 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -53,14 +53,10 @@
 #define BPF_REG_D	BPF_REG_8	/* data, callee-saved */
 #define BPF_REG_H	BPF_REG_9	/* hlen, callee-saved */
 
-/* Kernel hidden auxiliary/helper register for hardening step.
- * Only used by eBPF JITs. It's nothing more than a temporary
- * register that JITs use internally, only that here it's part
- * of eBPF instructions that have been rewritten for blinding
- * constants. See JIT pre-step in bpf_jit_blind_constants().
- */
+/* Kernel hidden auxiliary/helper register. */
 #define BPF_REG_AX		MAX_BPF_REG
-#define MAX_BPF_JIT_REG		(MAX_BPF_REG + 1)
+#define MAX_BPF_EXT_REG		(MAX_BPF_REG + 1)
+#define MAX_BPF_JIT_REG		MAX_BPF_EXT_REG
 
 /* unused opcode to mark special call to bpf_tail_call() helper */
 #define BPF_TAIL_CALL	0xf0
@@ -665,24 +661,10 @@
 	return size;
 }
 
-static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
-					   u32 size_default)
-{
-	size_default = bpf_ctx_off_adjust_machine(size_default);
-	size_access  = bpf_ctx_off_adjust_machine(size_access);
-
-#ifdef __LITTLE_ENDIAN
-	return (off & (size_default - 1)) == 0;
-#else
-	return (off & (size_default - 1)) + size_access == size_default;
-#endif
-}
-
 static inline bool
 bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
 {
-	return bpf_ctx_narrow_align_ok(off, size, size_default) &&
-	       size <= size_default && (size & (size - 1)) == 0;
+	return size <= size_default && (size & (size - 1)) == 0;
 }
 
 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 25c08c6..f767293 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -129,7 +129,7 @@
 	struct disk_stats dkstats;
 #endif
 	struct percpu_ref ref;
-	struct rcu_head rcu_head;
+	struct rcu_work rcu_work;
 };
 
 #define GENHD_FL_REMOVABLE			1
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 4c92e3b..5ec8635 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -499,8 +499,7 @@
  * enough and allocate struct page for it.
  *
  * The device driver can wrap the hmm_devmem struct inside a private device
- * driver struct. The device driver must call hmm_devmem_remove() before the
- * device goes away and before freeing the hmm_devmem struct memory.
+ * driver struct.
  */
 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
 				  struct device *device,
@@ -508,7 +507,6 @@
 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
 					   struct device *device,
 					   struct resource *res);
-void hmm_devmem_remove(struct hmm_devmem *devmem);
 
 /*
  * hmm_devmem_page_set_drvdata - set per-page driver data field
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 5185a16..bbde887 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1166,8 +1166,9 @@
 	u32 bytes_avail_towrite;
 };
 
-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
-			    struct hv_ring_buffer_debug_info *debug_info);
+
+int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+				struct hv_ring_buffer_debug_info *debug_info);
 
 /* Vmbus interface */
 #define vmbus_driver_register(driver)	\
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 6756fea..e44746d 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -54,6 +54,7 @@
 	case ARPHRD_IPGRE:
 	case ARPHRD_VOID:
 	case ARPHRD_NONE:
+	case ARPHRD_RAWIP:
 		return false;
 	default:
 		return true;
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 837f2f2..bb2c84a 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -281,4 +281,7 @@
 }
 #endif /* mul_u64_u32_div */
 
+#define DIV64_U64_ROUND_UP(ll, d)	\
+	({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
+
 #endif /* _LINUX_MATH64_H */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index f91f9e7..a84572c 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -106,6 +106,7 @@
  * @altmap: pre-allocated/reserved memory for vmemmap allocations
  * @res: physical address range covered by @ref
  * @ref: reference count that pins the devm_memremap_pages() mapping
+ * @kill: callback to transition @ref to the dead state
  * @dev: host device of the mapping for debug
  * @data: private data pointer for page_free()
  * @type: memory type: see MEMORY_* in memory_hotplug.h
@@ -117,6 +118,7 @@
 	bool altmap_valid;
 	struct resource res;
 	struct percpu_ref *ref;
+	void (*kill)(struct percpu_ref *ref);
 	struct device *dev;
 	void *data;
 	enum memory_type type;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 689fe9b..7fb6028 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1725,11 +1725,15 @@
 
 static inline void mm_inc_nr_puds(struct mm_struct *mm)
 {
+	if (mm_pud_folded(mm))
+		return;
 	atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
 }
 
 static inline void mm_dec_nr_puds(struct mm_struct *mm)
 {
+	if (mm_pud_folded(mm))
+		return;
 	atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
 }
 #endif
@@ -1749,11 +1753,15 @@
 
 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
 {
+	if (mm_pmd_folded(mm))
+		return;
 	atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
 }
 
 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
 {
+	if (mm_pmd_folded(mm))
+		return;
 	atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
 }
 #endif
diff --git a/include/linux/module.h b/include/linux/module.h
index 6b21060..2755a57 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -823,7 +823,7 @@
 static inline void module_bug_cleanup(struct module *mod) {}
 #endif	/* CONFIG_GENERIC_BUG */
 
-#ifdef RETPOLINE
+#ifdef CONFIG_RETPOLINE
 extern bool retpoline_module_ok(bool has_retpoline);
 #else
 static inline bool retpoline_module_ok(bool has_retpoline)
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 5839d80..be8ec81 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -116,6 +116,8 @@
 	list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
 #define for_each_msi_entry(desc, dev)	\
 	list_for_each_entry((desc), dev_to_msi_list((dev)), list)
+#define for_each_msi_entry_safe(desc, tmp, dev)	\
+	list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
 
 #ifdef CONFIG_PCI_MSI
 #define first_pci_msi_entry(pdev)	first_msi_entry(&(pdev)->dev)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e86a358..c765496 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1455,6 +1455,7 @@
  * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
  * @IFF_FAILOVER: device is a failover master device
  * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
+ * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
  */
 enum netdev_priv_flags {
 	IFF_802_1Q_VLAN			= 1<<0,
@@ -1486,6 +1487,7 @@
 	IFF_NO_RX_HANDLER		= 1<<26,
 	IFF_FAILOVER			= 1<<27,
 	IFF_FAILOVER_SLAVE		= 1<<28,
+	IFF_L3MDEV_RX_HANDLER		= 1<<29,
 };
 
 #define IFF_802_1Q_VLAN			IFF_802_1Q_VLAN
@@ -1516,6 +1518,7 @@
 #define IFF_NO_RX_HANDLER		IFF_NO_RX_HANDLER
 #define IFF_FAILOVER			IFF_FAILOVER
 #define IFF_FAILOVER_SLAVE		IFF_FAILOVER_SLAVE
+#define IFF_L3MDEV_RX_HANDLER		IFF_L3MDEV_RX_HANDLER
 
 /**
  *	struct net_device - The DEVICE structure.
@@ -4465,6 +4468,11 @@
 	return dev->priv_flags & IFF_SUPP_NOFCS;
 }
 
+static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
+}
+
 static inline bool netif_is_l3_master(const struct net_device *dev)
 {
 	return dev->priv_flags & IFF_L3MDEV_MASTER;
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 4a520d3..cf09ab3 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -62,18 +62,6 @@
 }
 #endif /* CONFIG_PROVE_LOCKING */
 
-/*
- * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex
- *
- * @p: The pointer to read, prior to dereferencing
- * @ss: The nfnetlink subsystem ID
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
- */
-#define nfnl_dereference(p, ss)					\
-	rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
-
 #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
 	MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
 
diff --git a/include/linux/of.h b/include/linux/of.h
index 99b0ebf..40e58b0e 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -138,11 +138,16 @@
 extern struct device_node *of_stdout;
 extern raw_spinlock_t devtree_lock;
 
-/* flag descriptions (need to be visible even when !CONFIG_OF) */
-#define OF_DYNAMIC	1 /* node and properties were allocated via kmalloc */
-#define OF_DETACHED	2 /* node has been detached from the device tree */
-#define OF_POPULATED	3 /* device already created for the node */
-#define OF_POPULATED_BUS	4 /* of_platform_populate recursed to children of this node */
+/*
+ * struct device_node flag descriptions
+ * (need to be visible even when !CONFIG_OF)
+ */
+#define OF_DYNAMIC		1 /* (and properties) allocated via kmalloc */
+#define OF_DETACHED		2 /* detached from the device tree */
+#define OF_POPULATED		3 /* device already created */
+#define OF_POPULATED_BUS	4 /* platform bus created for children */
+#define OF_OVERLAY		5 /* allocated for an overlay */
+#define OF_OVERLAY_FREE_CSET	6 /* in overlay cset being freed */
 
 #define OF_BAD_ADDR	((u64)-1)
 
diff --git a/include/linux/pinctrl/qcom-pinctrl.h b/include/linux/pinctrl/qcom-pinctrl.h
new file mode 100644
index 0000000..1ea9a87
--- /dev/null
+++ b/include/linux/pinctrl/qcom-pinctrl.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __LINUX_PINCTRL_MSM_H__
+#define __LINUX_PINCTRL_MSM_H__
+
+/* APIS to access qup_i3c registers */
+int msm_qup_write(u32 mode, u32 val);
+int msm_qup_read(u32 mode);
+
+
+#endif /* __LINUX_PINCTRL_MSM_H__ */
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 6894976..186cd8e 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -573,6 +573,8 @@
 		else if (destroy)
 			destroy(ptr);
 
+	if (producer >= size)
+		producer = 0;
 	__ptr_ring_set_size(r, size);
 	r->producer = producer;
 	r->consumer_head = 0;
diff --git a/include/linux/rq_stats.h b/include/linux/rq_stats.h
new file mode 100644
index 0000000..a0bccf1
--- /dev/null
+++ b/include/linux/rq_stats.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2011,2013-2014,2019, The Linux Foundation. All rights reserved.
+ *
+ */
+
+struct rq_data {
+	unsigned int rq_avg;
+	unsigned long rq_poll_jiffies;
+	unsigned long def_timer_jiffies;
+	unsigned long rq_poll_last_jiffy;
+	unsigned long rq_poll_total_jiffies;
+	unsigned long def_timer_last_jiffy;
+	unsigned int hotplug_disabled;
+	int64_t def_start_time;
+	struct attribute_group *attr_group;
+	struct kobject *kobj;
+	struct work_struct def_timer_work;
+	int init;
+};
+
+extern spinlock_t rq_lock;
+extern struct rq_data rq_info;
+extern struct workqueue_struct *rq_wq;
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index ec912d0..ecdc654 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -71,6 +71,7 @@
 #define MMF_HUGE_ZERO_PAGE	23      /* mm has ever used the global huge zero page */
 #define MMF_DISABLE_THP		24	/* disable THP for all VMAs */
 #define MMF_OOM_VICTIM		25	/* mm is the oom victim */
+#define MMF_OOM_REAP_QUEUED	26	/* mm was queued for oom_reaper */
 #define MMF_DISABLE_THP_MASK	(1 << MMF_DISABLE_THP)
 
 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index 2ffcb31..9615657 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -2,7 +2,6 @@
 #ifndef _LINUX_SCHED_CPUFREQ_H
 #define _LINUX_SCHED_CPUFREQ_H
 
-#include <linux/cpufreq.h>
 #include <linux/types.h>
 
 /*
@@ -35,12 +34,4 @@
 }
 #endif /* CONFIG_CPU_FREQ */
 
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-			struct cpufreq_governor *old_gov);
-#else
-static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-			struct cpufreq_governor *old_gov) { }
-#endif
-
 #endif /* _LINUX_SCHED_CPUFREQ_H */
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 1a356250..5981923 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -129,13 +129,6 @@
 					size_t *lenp, loff_t *ppos);
 #endif
 
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-extern unsigned int sysctl_sched_energy_aware;
-extern int sched_energy_aware_handler(struct ctl_table *table, int write,
-				 void __user *buffer, size_t *lenp,
-				 loff_t *ppos);
-#endif
-
 #define LIB_PATH_LENGTH 512
 extern char sched_lib_name[LIB_PATH_LENGTH];
 extern unsigned int sched_lib_mask_check;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 60a2e76..5d69e20 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3178,6 +3178,7 @@
  *
  *	This is exactly the same as pskb_trim except that it ensures the
  *	checksum of received packets are still valid after the operation.
+ *	It can change skb pointers.
  */
 
 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 73e130a..fdb6b31 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -295,9 +295,12 @@
 	struct svc_cacherep *	rq_cacherep;	/* cache info */
 	struct task_struct	*rq_task;	/* service thread */
 	spinlock_t		rq_lock;	/* per-request lock */
+	struct net		*rq_bc_net;	/* pointer to backchannel's
+						 * net namespace
+						 */
 };
 
-#define SVC_NET(svc_rqst)	(svc_rqst->rq_xprt->xpt_net)
+#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
 
 /*
  * Rigorous type checking on sockaddr type conversions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4750036..f5ec806 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -233,7 +233,6 @@
 	unsigned long	flags;		/* SWP_USED etc: see above */
 	signed short	prio;		/* swap priority of this type */
 	struct plist_node list;		/* entry in swap_active_head */
-	struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */
 	signed char	type;		/* strange name for an index */
 	unsigned int	max;		/* extent of the swap_map */
 	unsigned char *swap_map;	/* vmalloc'ed array of usage counts */
@@ -276,6 +275,16 @@
 	struct swap_cluster_list discard_clusters; /* discard clusters list */
 	unsigned int write_pending;
 	unsigned int max_writes;
+	struct plist_node avail_lists[0]; /*
+					   * entries in swap_avail_heads, one
+					   * entry per node.
+					   * Must be last as the number of the
+					   * array is nr_node_ids, which is not
+					   * a fixed value so have to allocate
+					   * dynamically.
+					   * And it has to be an array so that
+					   * plist_for_each_* can work.
+					   */
 };
 
 #ifdef CONFIG_64BIT
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index b9626aa..3e2a80c 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -39,12 +39,13 @@
 
 static inline u32 t10_pi_ref_tag(struct request *rq)
 {
+	unsigned int shift = ilog2(queue_logical_block_size(rq->q));
+
 #ifdef CONFIG_BLK_DEV_INTEGRITY
-	return blk_rq_pos(rq) >>
-		(rq->q->integrity.interval_exp - 9) & 0xffffffff;
-#else
-	return -1U;
+	if (rq->q->integrity.interval_exp)
+		shift = rq->q->integrity.interval_exp;
 #endif
+	return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
 }
 
 extern const struct blk_integrity_profile t10_pi_type1_crc;
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 7e7fbfb..50c74a7 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -89,6 +89,7 @@
 	enum typec_port_data data;
 	enum typec_role default_role;
 	bool try_role_hw;	/* try.{src,snk} implemented in hardware */
+	bool self_powered;	/* port belongs to a self powered device */
 
 	const struct typec_altmode_desc *alt_modes;
 };
diff --git a/include/linux/usb/usbpd.h b/include/linux/usb/usbpd.h
index 2c7ff09..286c566 100644
--- a/include/linux/usb/usbpd.h
+++ b/include/linux/usb/usbpd.h
@@ -99,6 +99,8 @@
  *         otherwise ORIENTATION_NONE if not attached
  */
 enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd);
+
+void usbpd_vdm_in_suspend(struct usbpd *pd, bool in_suspend);
 #else
 static inline struct usbpd *devm_usbpd_get_by_phandle(struct device *dev,
 		const char *phandle)
@@ -134,6 +136,8 @@
 {
 	return ORIENTATION_NONE;
 }
+
+static inline void usbpd_vdm_in_suspend(struct usbpd *pd, bool in_suspend) { }
 #endif /* IS_ENABLED(CONFIG_USB_PD_POLICY) */
 
 /*
diff --git a/include/media/cec.h b/include/media/cec.h
index 9b7394a..dc4b412 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -155,6 +155,7 @@
 	unsigned int transmit_queue_sz;
 	struct list_head wait_queue;
 	struct cec_data *transmitting;
+	bool transmit_in_progress;
 
 	struct task_struct *kthread_config;
 	struct completion config_completion;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 6a6b7b6..80e2183 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -37,6 +37,9 @@
 /* Indicate backport support for external authentication*/
 #define CFG80211_EXTERNAL_AUTH_SUPPORT 1
 
+/* Indicate backport support for external authentication in AP mode */
+#define CFG80211_EXTERNAL_AUTH_AP_SUPPORT 1
+
 /**
  * DOC: Introduction
  *
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index c9b7b13..95eed32 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -230,7 +230,7 @@
 		     struct netlink_ext_ack *extack);
 int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
 		   struct netlink_callback *cb);
-int fib_table_flush(struct net *net, struct fib_table *table);
+int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
 struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
 void fib_table_flush_external(struct fib_table *table);
 void fib_free_table(struct fib_table *tb);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index b0d022f..e114235 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -326,6 +326,26 @@
 int ip_tunnel_encap_setup(struct ip_tunnel *t,
 			  struct ip_tunnel_encap *ipencap);
 
+static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+{
+	int nhlen;
+
+	switch (skb->protocol) {
+#if IS_ENABLED(CONFIG_IPV6)
+	case htons(ETH_P_IPV6):
+		nhlen = sizeof(struct ipv6hdr);
+		break;
+#endif
+	case htons(ETH_P_IP):
+		nhlen = sizeof(struct iphdr);
+		break;
+	default:
+		nhlen = 0;
+	}
+
+	return pskb_network_may_pull(skb, nhlen);
+}
+
 static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
 {
 	const struct ip_tunnel_encap_ops *ops;
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 3832099..1284876 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -142,7 +142,8 @@
 
 	if (netif_is_l3_slave(skb->dev))
 		master = netdev_master_upper_dev_get_rcu(skb->dev);
-	else if (netif_is_l3_master(skb->dev))
+	else if (netif_is_l3_master(skb->dev) ||
+		 netif_has_l3_rx_handler(skb->dev))
 		master = skb->dev;
 
 	if (master && master->l3mdev_ops->l3mdev_l3_rcv)
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index 4b2b2ba..f32fc82 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -5,17 +5,10 @@
 
 struct nf_conncount_data;
 
-enum nf_conncount_list_add {
-	NF_CONNCOUNT_ADDED, 	/* list add was ok */
-	NF_CONNCOUNT_ERR,	/* -ENOMEM, must drop skb */
-	NF_CONNCOUNT_SKIP,	/* list is already reclaimed by gc */
-};
-
 struct nf_conncount_list {
 	spinlock_t list_lock;
 	struct list_head head;	/* connections with the same filtering key */
 	unsigned int count;	/* length of list */
-	bool dead;
 };
 
 struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
@@ -29,18 +22,12 @@
 				const struct nf_conntrack_tuple *tuple,
 				const struct nf_conntrack_zone *zone);
 
-void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
-			 const struct nf_conntrack_tuple *tuple,
-			 const struct nf_conntrack_zone *zone,
-			 bool *addit);
+int nf_conncount_add(struct net *net, struct nf_conncount_list *list,
+		     const struct nf_conntrack_tuple *tuple,
+		     const struct nf_conntrack_zone *zone);
 
 void nf_conncount_list_init(struct nf_conncount_list *list);
 
-enum nf_conncount_list_add
-nf_conncount_add(struct nf_conncount_list *list,
-		 const struct nf_conntrack_tuple *tuple,
-		 const struct nf_conntrack_zone *zone);
-
 bool nf_conncount_gc_list(struct net *net,
 			  struct nf_conncount_list *list);
 
diff --git a/include/net/sock.h b/include/net/sock.h
index f18dbd6..8f44733 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -298,6 +298,7 @@
   *	@sk_filter: socket filtering instructions
   *	@sk_timer: sock cleanup timer
   *	@sk_stamp: time stamp of last packet received
+  *	@sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
   *	@sk_tsflags: SO_TIMESTAMPING socket options
   *	@sk_tskey: counter to disambiguate concurrent tstamp requests
   *	@sk_zckey: counter to order MSG_ZEROCOPY notifications
@@ -474,6 +475,9 @@
 	const struct cred	*sk_peer_cred;
 	long			sk_rcvtimeo;
 	ktime_t			sk_stamp;
+#if BITS_PER_LONG==32
+	seqlock_t		sk_stamp_seq;
+#endif
 	u16			sk_tsflags;
 	u8			sk_shutdown;
 	u32			sk_tskey;
@@ -2290,6 +2294,34 @@
 	atomic_add(segs, &sk->sk_drops);
 }
 
+static inline ktime_t sock_read_timestamp(struct sock *sk)
+{
+#if BITS_PER_LONG==32
+	unsigned int seq;
+	ktime_t kt;
+
+	do {
+		seq = read_seqbegin(&sk->sk_stamp_seq);
+		kt = sk->sk_stamp;
+	} while (read_seqretry(&sk->sk_stamp_seq, seq));
+
+	return kt;
+#else
+	return sk->sk_stamp;
+#endif
+}
+
+static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
+{
+#if BITS_PER_LONG==32
+	write_seqlock(&sk->sk_stamp_seq);
+	sk->sk_stamp = kt;
+	write_sequnlock(&sk->sk_stamp_seq);
+#else
+	sk->sk_stamp = kt;
+#endif
+}
+
 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
 			   struct sk_buff *skb);
 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2314,7 +2346,7 @@
 	     (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
 		__sock_recv_timestamp(msg, sk, skb);
 	else
-		sk->sk_stamp = kt;
+		sock_write_timestamp(sk, kt);
 
 	if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
 		__sock_recv_wifi_status(msg, sk, skb);
@@ -2335,9 +2367,9 @@
 	if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
 		__sock_recv_ts_and_drops(msg, sk, skb);
 	else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
-		sk->sk_stamp = skb->tstamp;
+		sock_write_timestamp(sk, skb->tstamp);
 	else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
-		sk->sk_stamp = 0;
+		sock_write_timestamp(sk, 0);
 }
 
 void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
@@ -2489,6 +2521,16 @@
 		return;
 	sk->sk_pacing_shift = val;
 }
+/* SOCKEV Notifier Events */
+#define SOCKEV_SOCKET   0x00
+#define SOCKEV_BIND     0x01
+#define SOCKEV_LISTEN   0x02
+#define SOCKEV_ACCEPT   0x03
+#define SOCKEV_CONNECT  0x04
+#define SOCKEV_SHUTDOWN 0x05
+
+int sockev_register_notify(struct notifier_block *nb);
+int sockev_unregister_notify(struct notifier_block *nb);
 
 /* if a socket is bound to a device, check that the given device
  * index is either the same or that the socket is bound to an L3
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 0eb390c..da588de 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1552,6 +1552,7 @@
 		    int (*func)(struct xfrm_state *, int, void*), void *);
 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
 struct xfrm_state *xfrm_state_alloc(struct net *net);
+void xfrm_state_free(struct xfrm_state *x);
 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
 				   const xfrm_address_t *saddr,
 				   const struct flowi *fl,
diff --git a/include/soc/qcom/qtee_shmbridge.h b/include/soc/qcom/qtee_shmbridge.h
index c023653..4cc332d 100644
--- a/include/soc/qcom/qtee_shmbridge.h
+++ b/include/soc/qcom/qtee_shmbridge.h
@@ -6,6 +6,9 @@
 #ifndef __QTEE_SHMBRIDGE_H__
 #define __QTEE_SHMBRIDGE_H__
 
+/* VMID and permission definitions */
+#include <soc/qcom/secure_buffer.h>
+
 /**
  * struct qtee_shm - info of shared memory allocated from the default bridge
  * @ paddr: physical address of the shm allocated from the default bridge
@@ -28,27 +31,29 @@
 /**
  * Register paddr & size as a bridge, get bridge handle
  *
- * @ paddr: paddr of buffer to be turned into bridge
- * @ size: size of the bridge
- * @ ns_vmid: non-secure vmid, like VMID_HLOS
- * @ ns_vm_perm: NS VM permission, like PERM_READ, PERM_WRITE
- * @ tz_perm: TZ permission
- * @ *handle: output shmbridge handle
+ * @ [IN] addr: paddr of buffer to be turned into bridge
+ * @ [IN] size: size of the bridge
+ * @ [IN] ns_vmid_list: non-secure vmids array
+ * @ [IN] ns_vm_perm_list: NS VM permission array
+ * @ [IN] ns_vmid_num: number of NS VMIDs (at most 4)
+ * @ [IN] tz_perm: TZ permission
+ * @ [OUT] *handle: output shmbridge handle
  *
  * return success or error
  */
 int32_t qtee_shmbridge_register(
 		phys_addr_t paddr,
 		size_t size,
-		uint32_t ns_vmid,
-		uint32_t ns_vm_perm,
+		uint32_t *ns_vmid_list,
+		uint32_t *ns_vm_perm_list,
+		uint32_t ns_vmid_num,
 		uint32_t tz_perm,
 		uint64_t *handle);
 
 /**
  * Deregister bridge
  *
- * @ handle: shmbridge handle
+ * @ [IN] handle: shmbridge handle
  *
  * return success or error
  */
@@ -57,8 +62,8 @@
 /**
  * Sub-allocate from default kernel bridge created by shmb driver
  *
- * @ size: size of the buffer to be sub-allocated from the bridge
- * @ *shm: output qtee_shm structure with buffer paddr, vaddr and
+ * @ [IN] size: size of the buffer to be sub-allocated from the bridge
+ * @ [OUT] *shm: output qtee_shm structure with buffer paddr, vaddr and
  *         size; returns ERR_PTR or NULL otherwise
  *
  * return success or error
@@ -68,7 +73,7 @@
 /*
  * Free buffer that is sub-allocated from default kernel bridge
  *
- * @ shm: qtee_shm structure to be freed
+ * @ [IN] shm: qtee_shm structure to be freed
  *
  */
 void qtee_shmbridge_free_shm(struct qtee_shm *shm);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7a4ee78..2cfd3b4 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -602,6 +602,7 @@
 	struct se_node_acl	*se_node_acl;
 	struct se_portal_group *se_tpg;
 	void			*fabric_sess_ptr;
+	struct percpu_ref	cmd_count;
 	struct list_head	sess_list;
 	struct list_head	sess_acl_list;
 	struct list_head	sess_cmd_list;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index f4147b3..eb9d092 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -116,7 +116,7 @@
 				struct se_session *, void *));
 void target_remove_session(struct se_session *);
 
-void transport_init_session(struct se_session *);
+int transport_init_session(struct se_session *se_sess);
 struct se_session *transport_alloc_session(enum target_prot_op);
 int transport_alloc_session_tags(struct se_session *, unsigned int,
 		unsigned int);
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index 5017a88..a0773a5 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -8,7 +8,7 @@
 #include <linux/types.h>
 #include <linux/tracepoint.h>
 
-TRACE_EVENT(cma_alloc,
+DECLARE_EVENT_CLASS(cma_alloc_class,
 
 	TP_PROTO(unsigned long pfn, const struct page *page,
 		 unsigned int count, unsigned int align),
@@ -61,6 +61,44 @@
 		  __entry->count)
 );
 
+TRACE_EVENT(cma_alloc_start,
+
+	TP_PROTO(unsigned int count, unsigned int align),
+
+	TP_ARGS(count, align),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, count)
+		__field(unsigned int, align)
+	),
+
+	TP_fast_assign(
+		__entry->count = count;
+		__entry->align = align;
+	),
+
+	TP_printk("count=%u align=%u",
+		  __entry->count,
+		  __entry->align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+
 #endif /* _TRACE_CMA_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 0e31eb1..0dfb174 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -225,6 +225,26 @@
 		  (unsigned long) __entry->ino, __entry->drop)
 );
 
+TRACE_EVENT(ext4_nfs_commit_metadata,
+	TP_PROTO(struct inode *inode),
+
+	TP_ARGS(inode),
+
+	TP_STRUCT__entry(
+		__field(	dev_t,	dev			)
+		__field(	ino_t,	ino			)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->ino	= inode->i_ino;
+	),
+
+	TP_printk("dev %d,%d ino %lu",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  (unsigned long) __entry->ino)
+);
+
 TRACE_EVENT(ext4_mark_inode_dirty,
 	TP_PROTO(struct inode *inode, unsigned long IP),
 
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 705b33d..ff2d62a 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -70,6 +70,54 @@
 		__print_symbolic(__entry->mode, MIGRATE_MODE),
 		__print_symbolic(__entry->reason, MIGRATE_REASON))
 );
+
+TRACE_EVENT(mm_numa_migrate_ratelimit,
+
+	TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages),
+
+	TP_ARGS(p, dst_nid, nr_pages),
+
+	TP_STRUCT__entry(
+		__array(char,		comm,	TASK_COMM_LEN)
+		__field(pid_t,		pid)
+		__field(int,		dst_nid)
+		__field(unsigned long,	nr_pages)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid		= p->pid;
+		__entry->dst_nid	= dst_nid;
+		__entry->nr_pages	= nr_pages;
+	),
+
+	TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu",
+		__entry->comm,
+		__entry->pid,
+		__entry->dst_nid,
+		__entry->nr_pages)
+);
+
+TRACE_EVENT(mm_migrate_pages_start,
+
+	TP_PROTO(enum migrate_mode mode, int reason),
+
+	TP_ARGS(mode, reason),
+
+	TP_STRUCT__entry(
+		__field(enum migrate_mode, mode)
+		__field(int, reason)
+	),
+
+	TP_fast_assign(
+		__entry->mode	= mode;
+		__entry->reason	= reason;
+	),
+
+	TP_printk("mode=%s reason=%s",
+		__print_symbolic(__entry->mode, MIGRATE_MODE),
+		__print_symbolic(__entry->reason, MIGRATE_REASON))
+);
 #endif /* _TRACE_MIGRATE_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index bbb08a3..a2644c4 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -582,7 +582,8 @@
 		__field(u32, vers)
 		__field(u32, proc)
 		__string(service, name)
-		__string(addr, rqst->rq_xprt->xpt_remotebuf)
+		__string(addr, rqst->rq_xprt ?
+			 rqst->rq_xprt->xpt_remotebuf : "(null)")
 	),
 
 	TP_fast_assign(
@@ -590,7 +591,8 @@
 		__entry->vers = rqst->rq_vers;
 		__entry->proc = rqst->rq_proc;
 		__assign_str(service, name);
-		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+		__assign_str(addr, rqst->rq_xprt ?
+			     rqst->rq_xprt->xpt_remotebuf : "(null)");
 	),
 
 	TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u",
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index a68420d..2645523 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -468,12 +468,14 @@
 	__u32 factor_out;
 };
 
+#define LTM_FEATURE_DEF 1
 #define LTM_DATA_SIZE_0 32
 #define LTM_DATA_SIZE_1 128
 #define LTM_DATA_SIZE_2 256
 #define LTM_DATA_SIZE_3 33
 #define LTM_BUFFER_SIZE 5
 #define LTM_GUARD_BYTES 255
+#define LTM_BLOCK_SIZE 2
 
 #define LTM_STATS_SAT (1 << 1)
 #define LTM_STATS_MERGE_SAT (1 << 2)
@@ -488,6 +490,18 @@
 	__u32 stats_04[LTM_DATA_SIZE_0];
 	__u32 stats_05[LTM_DATA_SIZE_0];
 	__u32 status_flag;
+	__u32 display_h;
+	__u32 display_v;
+	__u32 init_h[LTM_BLOCK_SIZE];
+	__u32 init_v;
+	__u32 inc_h;
+	__u32 inc_v;
+	__u32 portrait_en;
+	__u32 merge_en;
+	__u32 cfg_param_01;
+	__u32 cfg_param_02;
+	__u32 cfg_param_03;
+	__u32 cfg_param_04;
 };
 
 /*
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 8045240..ada1bce 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -18,3 +18,4 @@
 include include/linux/Kbuild.vservices
 endif
 header-y += okl4-link-shbuf.h
+header-y += sockev.h
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 6b634e2..7a03c15 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -744,6 +744,15 @@
 
 #define ABS_MISC		0x28
 
+/*
+ * 0x2e is reserved and should not be used in input drivers.
+ * It was used by HID as ABS_MISC+6 and userspace needs to detect if
+ * the next ABS_* event is correct or is just ABS_MISC + n.
+ * We define here ABS_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define ABS_RESERVED		0x2e
+
 #define ABS_MT_SLOT		0x2f	/* MT slot being modified */
 #define ABS_MT_TOUCH_MAJOR	0x30	/* Major axis of touching ellipse */
 #define ABS_MT_TOUCH_MINOR	0x31	/* Minor axis (omit if circular) */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index fb78f6f..f056b2a 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -26,13 +26,17 @@
  */
 
 struct input_event {
-#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL)
+#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__)
 	struct timeval time;
 #define input_event_sec time.tv_sec
 #define input_event_usec time.tv_usec
 #else
 	__kernel_ulong_t __sec;
+#if defined(__sparc__) && defined(__arch64__)
+	unsigned int __usec;
+#else
 	__kernel_ulong_t __usec;
+#endif
 #define input_event_sec  __sec
 #define input_event_usec __usec
 #endif
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 97ff3c1..e5b3972 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -155,8 +155,8 @@
 };
 
 struct sock_txtime {
-	clockid_t	clockid;	/* reference clockid */
-	__u32		flags;		/* as defined by enum txtime_flags */
+	__kernel_clockid_t	clockid;/* reference clockid */
+	__u32			flags;	/* as defined by enum txtime_flags */
 };
 
 #endif /* _NET_TIMESTAMPING_H */
diff --git a/include/uapi/linux/netfilter/xt_HARDIDLETIMER.h b/include/uapi/linux/netfilter/xt_HARDIDLETIMER.h
new file mode 100644
index 0000000..aa6a5ac
--- /dev/null
+++ b/include/uapi/linux/netfilter/xt_HARDIDLETIMER.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/* linux/include/linux/netfilter/xt_HARDIDLETIMER.h
+ *
+ * Header file for Xtables timer target module.
+ *
+ * Copyright (c) 2014, 2017, 2019 The Linux Foundation. All rights reserved.
+ *
+ * Copyright (C) 2004, 2010 Nokia Corporation
+ *
+ * Written by Timo Teras <ext-timo.teras@nokia.com>
+ *
+ * Converted to x_tables and forward-ported to 2.6.34
+ * by Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ */
+
+#ifndef _XT_HARDIDLETIMER_H
+#define _XT_HARDIDLETIMER_H
+
+#include <linux/types.h>
+
+#define MAX_HARDIDLETIMER_LABEL_SIZE 28
+#define NLMSG_MAX_SIZE 64
+
+#define NL_EVENT_TYPE_INACTIVE 0
+#define NL_EVENT_TYPE_ACTIVE 1
+
+struct hardidletimer_tg_info {
+	__u32 timeout;
+
+	char label[MAX_HARDIDLETIMER_LABEL_SIZE];
+
+	/* Use netlink messages for notification in addition to sysfs */
+	__u8 send_nl_msg;
+
+	/* for kernel module internal use only */
+	struct hardidletimer_tg *timer __attribute__((aligned(8)));
+};
+
+#endif
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 776bc92..5fa3fcc 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -29,7 +29,7 @@
 #define NETLINK_RDMA		20
 #define NETLINK_CRYPTO		21	/* Crypto layer */
 #define NETLINK_SMC		22	/* SMC monitoring */
-
+#define NETLINK_SOCKEV		23	/* Socket Administrative Events */
 #define NETLINK_INET_DIAG	NETLINK_SOCK_DIAG
 
 #define MAX_LINKS 32		
diff --git a/include/uapi/linux/qrng.h b/include/uapi/linux/qrng.h
new file mode 100644
index 0000000..b999eee
--- /dev/null
+++ b/include/uapi/linux/qrng.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+#ifndef _UAPI_QRNG_H_
+#define _UAPI_QRNG_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define QRNG_IOC_MAGIC    0x100
+
+#define QRNG_IOCTL_RESET_BUS_BANDWIDTH\
+	_IO(QRNG_IOC_MAGIC, 1)
+
+#endif /* _UAPI_QRNG_H_ */
diff --git a/include/uapi/linux/sockev.h b/include/uapi/linux/sockev.h
new file mode 100644
index 0000000..df42986
--- /dev/null
+++ b/include/uapi/linux/sockev.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+
+#ifndef _SOCKEV_H_
+#define _SOCKEV_H_
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+#include <linux/socket.h>
+
+enum sknetlink_groups {
+	SKNLGRP_UNICAST,
+	SKNLGRP_SOCKEV,
+	__SKNLGRP_MAX
+};
+
+#define SOCKEV_STR_MAX 32
+
+/********************************************************************
+ *		Socket operation messages
+ ****/
+
+struct sknlsockevmsg {
+	__u8 event[SOCKEV_STR_MAX];
+	__u32 pid; /* (struct task_struct*)->pid */
+	__u16 skfamily; /* (struct socket*)->sk->sk_family */
+	__u8 skstate; /* (struct socket*)->sk->sk_state */
+	__u8 skprotocol; /* (struct socket*)->sk->sk_protocol */
+	__u16 sktype; /* (struct socket*)->sk->sk_type */
+	__u64 skflags; /* (struct socket*)->sk->sk_flags */
+};
+
+#endif /* _SOCKEV_H_ */
+
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index f121783..a3179ac 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -821,6 +821,7 @@
 	EXTRADATA_ADVANCED = 2,
 	EXTRADATA_ENC_INPUT_ROI = 4,
 	EXTRADATA_ENC_INPUT_HDR10PLUS = 8,
+	EXTRADATA_ENC_INPUT_CVP = 16,
 };
 enum v4l2_mpeg_cvp_extradata {
 	V4L2_MPEG_CVP_EXTRADATA_NONE = 0,
@@ -973,6 +974,11 @@
 	V4L2_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER_6 = 6,
 };
 
+#define V4L2_CID_MPEG_VIDC_VENC_CVP_DISABLE \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 121)
+#define V4L2_CID_MPEG_VIDC_VENC_NATIVE_RECORDER \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 122)
+
 /*  Camera class control IDs */
 
 #define V4L2_CID_CAMERA_CLASS_BASE	(V4L2_CTRL_CLASS_CAMERA | 0x900)
diff --git a/include/uapi/linux/wil6210_uapi.h b/include/uapi/linux/wil6210_uapi.h
new file mode 100644
index 0000000..ecc9599
--- /dev/null
+++ b/include/uapi/linux/wil6210_uapi.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __WIL6210_UAPI_H__
+#define __WIL6210_UAPI_H__
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#include <linux/sockios.h>
+
+/* Numbers SIOCDEVPRIVATE and SIOCDEVPRIVATE + 1
+ * are used by Android devices to implement PNO (preferred network offload).
+ * Albeit it is temporary solution, use different numbers to avoid conflicts
+ */
+
+/**
+ * Perform 32-bit I/O operation to the card memory
+ *
+ * User code should arrange data in memory like this:
+ *
+ *	struct wil_memio io;
+ *	struct ifreq ifr = {
+ *		.ifr_data = &io,
+ *	};
+ */
+#define WIL_IOCTL_MEMIO (SIOCDEVPRIVATE + 2)
+
+/**
+ * Perform block I/O operation to the card memory
+ *
+ * User code should arrange data in memory like this:
+ *
+ *	void *buf;
+ *	struct wil_memio_block io = {
+ *		.block = buf,
+ *	};
+ *	struct ifreq ifr = {
+ *		.ifr_data = &io,
+ *	};
+ */
+#define WIL_IOCTL_MEMIO_BLOCK (SIOCDEVPRIVATE + 3)
+
+/** operation to perform */
+#define WIL_MMIO_READ 0
+#define WIL_MMIO_WRITE 1
+#define WIL_MMIO_OP_MASK 0xff
+
+/** addressing mode to use */
+#define WIL_MMIO_ADDR_LINKER (0 << 8)
+#define WIL_MMIO_ADDR_AHB (1 << 8)
+#define WIL_MMIO_ADDR_BAR (2 << 8)
+#define WIL_MMIO_ADDR_MASK 0xff00
+
+struct wil_memio {
+	uint32_t op; /* enum wil_memio_op */
+	uint32_t addr; /* should be 32-bit aligned */
+	uint32_t val;
+};
+
+struct wil_memio_block {
+	uint32_t op; /* enum wil_memio_op */
+	uint32_t addr; /* should be 32-bit aligned */
+	uint32_t size; /* should be multiple of 4 */
+	uint64_t __user block; /* block address */
+};
+
+#endif /* __WIL6210_UAPI_H__ */
diff --git a/include/uapi/media/msm_cvp_private.h b/include/uapi/media/msm_cvp_private.h
index 2be53e6..82f69c2 100644
--- a/include/uapi/media/msm_cvp_private.h
+++ b/include/uapi/media/msm_cvp_private.h
@@ -8,6 +8,7 @@
 #include <linux/videodev2.h>
 
 #define MAX_DFS_HFI_PARAMS 20
+#define HFI_MAX_PLANES 4
 
 /* VIDIOC private cvp command */
 #define VIDIOC_CVP_CMD \
@@ -58,6 +59,16 @@
 
 #define MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE  (MSM_CVP_CMD_START + 8)
 
+#define MSM_CVP_HFI_DME_CONFIG_CMD  (MSM_CVP_CMD_START + 9)
+
+#define MSM_CVP_HFI_DME_FRAME_CMD  (MSM_CVP_CMD_START + 10)
+
+#define MSM_CVP_HFI_DME_FRAME_CMD_RESPONSE  (MSM_CVP_CMD_START + 11)
+
+#define MSM_CVP_HFI_PERSIST_CMD  (MSM_CVP_CMD_START + 12)
+
+#define MSM_CVP_HFI_PERSIST_CMD_RESPONSE  (MSM_CVP_CMD_START + 13)
+
 /* flags */
 #define MSM_CVP_FLAG_UNSECURE			0x00000000
 #define MSM_CVP_FLAG_SECURE			0x00000001
@@ -126,15 +137,6 @@
 };
 
 /**
- * enum HFI_COLOR_PLANE_TYPE - define the type of plane
- */
-enum HFI_COLOR_PLANE_TYPE {
-	HFI_COLOR_PLANE_METADATA,
-	HFI_COLOR_PLANE_PICDATA,
-	HFI_MAX_PLANES
-};
-
-/**
  * struct msm_cvp_color_plane_info - color plane info
  * @stride:      stride of plane
  * @buf_size:    size of plane
@@ -157,77 +159,40 @@
 	unsigned int client_data2;
 };
 
-/**
- * struct msm_cvp_dfsconfig - dfs config packet
- * @cmd_size:               command size in bytes
- * @cmd_address:            command address
- * @size:                   packet size in bytes
- * @packet_type:            HFI_CMD_SESSION_CVP_DFS
- * @session_id:             id value associated with a session
- * @srcbuffer_format:       buffer format of source imagesize
- * @left_plane_info:        left view buffer plane info
- * @right_plane_info:       right view buffer plane info
- * @width:                  image width
- * @height:                 image height
- * @occlusionmask_enable:   0: disable, 1: enable
- * @occlusioncost:          occlusion cost threshold
- * @occlusionbound:         occlusion bound
- * @occlusionshift:         occlusion shift
- * @maxdisparity:           max disparitymap in integer precision
- * @disparityoffset:        disparity offset
- * @medianfilter_enable:    enable median filter on disparity map
- * @occlusionfilling_enable:0: disable, 1: enable
- * @occlusionmaskdump:      0: disable, 1: enable
- * @clientdata:             client data for mapping command
- *                          and message pairs
- */
-struct msm_cvp_dfsconfig {
-	unsigned int cmd_size;
-	unsigned int cmd_address;
-	unsigned int size;
-	unsigned int packet_type;
-	unsigned int session_id;
-	unsigned int srcbuffer_format;
-	struct msm_cvp_color_plane_info left_plane_info;
-	struct msm_cvp_color_plane_info right_plane_info;
-	unsigned int width;
-	unsigned int height;
-	unsigned int occlusionmask_enable;
-	unsigned int occlusioncost;
-	unsigned int occlusionbound;
-	unsigned int occlusionshift;
-	unsigned int maxdisparity;
-	unsigned int disparityoffset;
-	unsigned int medianfilter_enable;
-	unsigned int occlusionfilling_enable;
-	unsigned int occlusionmaskdump;
-	struct msm_cvp_client_data clientdata;
-	unsigned int reserved[MAX_DFS_HFI_PARAMS];
+#define CVP_COLOR_PLANE_INFO_SIZE \
+	sizeof(struct msm_cvp_color_plane_info)
+#define CVP_CLIENT_DATA_SIZE	sizeof(struct msm_cvp_client_data)
+#define CVP_DFS_CONFIG_CMD_SIZE   38
+#define CVP_DFS_FRAME_CMD_SIZE 16
+#define CVP_DFS_FRAME_BUFFERS_OFFSET 8
+
+#define CVP_DME_CONFIG_CMD_SIZE   181
+#define CVP_DME_FRAME_CMD_SIZE 28
+#define CVP_DME_FRAME_BUFFERS_OFFSET 12
+#define CVP_DME_BUF_NUM	8
+
+#define CVP_PERSIST_CMD_SIZE 11
+#define CVP_PERSIST_BUFFERS_OFFSET 7
+#define CVP_PSRSIST_BUF_NUM	2
+
+struct msm_cvp_dfs_config {
+	unsigned int cvp_dfs_config[CVP_DFS_CONFIG_CMD_SIZE];
 };
 
-/**
- * struct msm_cvp_dfsframe - dfs frame packet
- * @cmd_size:                command size in bytes
- * @cmd_address:             command address
- * @size:                    packet size in bytes
- * @packet_type:             HFI_CMD_SESSION_CVP_DFS
- * @session_id:              id value associated with a session
- * @left_buffer_index:       left buffer index
- * @right_buffer_index:      right buffer index
- * @disparitymap_buffer_idx: disparity map buffer index
- * @occlusionmask_buffer_idx:occlusion mask buffer index
- */
-struct msm_cvp_dfsframe {
-	unsigned int cmd_size;
-	unsigned int cmd_address;
-	unsigned int size;
-	unsigned int packet_type;
-	unsigned int session_id;
-	unsigned int left_buffer_index;
-	unsigned int right_buffer_index;
-	unsigned int disparitymap_buffer_idx;
-	unsigned int occlusionmask_buffer_idx;
-	struct msm_cvp_client_data clientdata;
+struct msm_cvp_dfs_frame {
+	unsigned int frame_data[CVP_DFS_FRAME_CMD_SIZE];
+};
+
+struct msm_cvp_dme_config {
+	unsigned int cvp_dme_config[CVP_DME_CONFIG_CMD_SIZE];
+};
+
+struct msm_cvp_dme_frame {
+	unsigned int frame_data[CVP_DME_FRAME_CMD_SIZE];
+};
+
+struct msm_cvp_persist_buf {
+	unsigned int persist_data[CVP_PERSIST_CMD_SIZE];
 };
 
 /**
@@ -238,8 +203,8 @@
  * @regbuf:        buffer to be registered
  * @unregbuf:      buffer to be unregistered
  * @send_cmd:      sending generic HFI command
- * @dfsconfig:     sending DFS config command
- * @dfsframe:      sending DFS frame command
+ * @dfs_config:    sending DFS config command
+ * @dfs_frame:     sending DFS frame command
  */
 struct msm_cvp_arg {
 	unsigned int type;
@@ -249,8 +214,11 @@
 		struct msm_cvp_buffer regbuf;
 		struct msm_cvp_buffer unregbuf;
 		struct msm_cvp_send_cmd send_cmd;
-		struct msm_cvp_dfsconfig dfsconfig;
-		struct msm_cvp_dfsframe dfsframe;
+		struct msm_cvp_dfs_config dfs_config;
+		struct msm_cvp_dfs_frame dfs_frame;
+		struct msm_cvp_dme_config dme_config;
+		struct msm_cvp_dme_frame dme_frame;
+		struct msm_cvp_persist_buf pbuf_cmd;
 	} data;
 	unsigned int reserved[12];
 };
diff --git a/include/uapi/media/msm_vidc_utils.h b/include/uapi/media/msm_vidc_utils.h
index 6316cb0..14ee584 100644
--- a/include/uapi/media/msm_vidc_utils.h
+++ b/include/uapi/media/msm_vidc_utils.h
@@ -268,6 +268,11 @@
 	__u32 chroma_sample_loc_type_bottom_field;
 };
 
+#define  MSM_VIDC_EXTRADATA_HDR_HIST 0x7F100008
+struct msm_vidc_extradata_hdr_hist_payload {
+	__u32 value_count[1024];
+};
+
 #define MSM_VIDC_EXTRADATA_MPEG2_SEQDISP 0x0000000D
 struct msm_vidc_mpeg2_seqdisp_payload {
 	__u32 video_format;
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index d13fd49..6e73f02 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -78,6 +78,7 @@
 	PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
 	PVRDMA_WR_BIND_MW,
 	PVRDMA_WR_REG_SIG_MR,
+	PVRDMA_WR_ERROR,
 };
 
 enum pvrdma_wc_status {
diff --git a/init/Kconfig b/init/Kconfig
index 6e3059a..e6d0841 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1159,6 +1159,7 @@
 	bool "Dead code and data elimination (EXPERIMENTAL)"
 	depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
 	depends on EXPERT
+	depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
 	depends on $(cc-option,-ffunction-sections -fdata-sections)
 	depends on $(ld-option,--gc-sections)
 	help
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 3f5bf1a..474525e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -52,6 +52,7 @@
 #define DST	regs[insn->dst_reg]
 #define SRC	regs[insn->src_reg]
 #define FP	regs[BPF_REG_FP]
+#define AX	regs[BPF_REG_AX]
 #define ARG1	regs[BPF_REG_ARG1]
 #define CTX	regs[BPF_REG_CTX]
 #define IMM	insn->imm
@@ -642,6 +643,26 @@
 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
 
+	/* Constraints on AX register:
+	 *
+	 * AX register is inaccessible from user space. It is mapped in
+	 * all JITs, and used here for constant blinding rewrites. It is
+	 * typically "stateless" meaning its contents are only valid within
+	 * the executed instruction, but not across several instructions.
+	 * There are a few exceptions however which are further detailed
+	 * below.
+	 *
+	 * Constant blinding is only used by JITs, not in the interpreter.
+	 * The interpreter uses AX in some occasions as a local temporary
+	 * register e.g. in DIV or MOD instructions.
+	 *
+	 * In restricted circumstances, the verifier can also use the AX
+	 * register for rewrites as long as they do not interfere with
+	 * the above cases!
+	 */
+	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
+		goto out;
+
 	if (from->imm == 0 &&
 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
@@ -971,7 +992,6 @@
  */
 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
 {
-	u64 tmp;
 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
 	static const void *jumptable[256] = {
@@ -1045,36 +1065,36 @@
 		(*(s64 *) &DST) >>= IMM;
 		CONT;
 	ALU64_MOD_X:
-		div64_u64_rem(DST, SRC, &tmp);
-		DST = tmp;
+		div64_u64_rem(DST, SRC, &AX);
+		DST = AX;
 		CONT;
 	ALU_MOD_X:
-		tmp = (u32) DST;
-		DST = do_div(tmp, (u32) SRC);
+		AX = (u32) DST;
+		DST = do_div(AX, (u32) SRC);
 		CONT;
 	ALU64_MOD_K:
-		div64_u64_rem(DST, IMM, &tmp);
-		DST = tmp;
+		div64_u64_rem(DST, IMM, &AX);
+		DST = AX;
 		CONT;
 	ALU_MOD_K:
-		tmp = (u32) DST;
-		DST = do_div(tmp, (u32) IMM);
+		AX = (u32) DST;
+		DST = do_div(AX, (u32) IMM);
 		CONT;
 	ALU64_DIV_X:
 		DST = div64_u64(DST, SRC);
 		CONT;
 	ALU_DIV_X:
-		tmp = (u32) DST;
-		do_div(tmp, (u32) SRC);
-		DST = (u32) tmp;
+		AX = (u32) DST;
+		do_div(AX, (u32) SRC);
+		DST = (u32) AX;
 		CONT;
 	ALU64_DIV_K:
 		DST = div64_u64(DST, IMM);
 		CONT;
 	ALU_DIV_K:
-		tmp = (u32) DST;
-		do_div(tmp, (u32) IMM);
-		DST = (u32) tmp;
+		AX = (u32) DST;
+		do_div(AX, (u32) IMM);
+		DST = (u32) AX;
 		CONT;
 	ALU_END_TO_BE:
 		switch (IMM) {
@@ -1330,7 +1350,7 @@
 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
 { \
 	u64 stack[stack_size / sizeof(u64)]; \
-	u64 regs[MAX_BPF_REG]; \
+	u64 regs[MAX_BPF_EXT_REG]; \
 \
 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
 	ARG1 = (u64) (unsigned long) ctx; \
@@ -1343,7 +1363,7 @@
 				      const struct bpf_insn *insn) \
 { \
 	u64 stack[stack_size / sizeof(u64)]; \
-	u64 regs[MAX_BPF_REG]; \
+	u64 regs[MAX_BPF_EXT_REG]; \
 \
 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
 	BPF_R1 = r1; \
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index 3bfbf44..9670ee5 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -12,6 +12,7 @@
 struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
 {
 	struct bpf_map *inner_map, *inner_map_meta;
+	u32 inner_map_meta_size;
 	struct fd f;
 
 	f = fdget(inner_map_ufd);
@@ -35,7 +36,12 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER);
+	inner_map_meta_size = sizeof(*inner_map_meta);
+	/* In some cases verifier needs to access beyond just base map. */
+	if (inner_map->ops == &array_map_ops)
+		inner_map_meta_size = sizeof(struct bpf_array);
+
+	inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
 	if (!inner_map_meta) {
 		fdput(f);
 		return ERR_PTR(-ENOMEM);
@@ -45,9 +51,16 @@
 	inner_map_meta->key_size = inner_map->key_size;
 	inner_map_meta->value_size = inner_map->value_size;
 	inner_map_meta->map_flags = inner_map->map_flags;
-	inner_map_meta->ops = inner_map->ops;
 	inner_map_meta->max_entries = inner_map->max_entries;
 
+	/* Misc members not needed in bpf_map_meta_equal() check. */
+	inner_map_meta->ops = inner_map->ops;
+	if (inner_map->ops == &array_map_ops) {
+		inner_map_meta->unpriv_array = inner_map->unpriv_array;
+		container_of(inner_map_meta, struct bpf_array, map)->index_mask =
+		     container_of(inner_map, struct bpf_array, map)->index_mask;
+	}
+
 	fdput(f);
 	return inner_map_meta;
 }
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2954e4b..4d81be2 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -156,6 +156,7 @@
 
 #define BPF_COMPLEXITY_LIMIT_INSNS	131072
 #define BPF_COMPLEXITY_LIMIT_STACK	1024
+#define BPF_COMPLEXITY_LIMIT_STATES	64
 
 #define BPF_MAP_PTR_UNPRIV	1UL
 #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
@@ -465,6 +466,7 @@
 		free_func_state(dst_state->frame[i]);
 		dst_state->frame[i] = NULL;
 	}
+	dst_state->speculative = src->speculative;
 	dst_state->curframe = src->curframe;
 	dst_state->parent = src->parent;
 	for (i = 0; i <= src->curframe; i++) {
@@ -510,7 +512,8 @@
 }
 
 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
-					     int insn_idx, int prev_insn_idx)
+					     int insn_idx, int prev_insn_idx,
+					     bool speculative)
 {
 	struct bpf_verifier_state *cur = env->cur_state;
 	struct bpf_verifier_stack_elem *elem;
@@ -528,6 +531,7 @@
 	err = copy_verifier_state(&elem->st, cur);
 	if (err)
 		goto err;
+	elem->st.speculative |= speculative;
 	if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
 		verbose(env, "BPF program is too complex\n");
 		goto err;
@@ -1237,6 +1241,31 @@
 	}
 }
 
+static int check_stack_access(struct bpf_verifier_env *env,
+			      const struct bpf_reg_state *reg,
+			      int off, int size)
+{
+	/* Stack accesses must be at a fixed offset, so that we
+	 * can determine what type of data were returned. See
+	 * check_stack_read().
+	 */
+	if (!tnum_is_const(reg->var_off)) {
+		char tn_buf[48];
+
+		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+		verbose(env, "variable stack access var_off=%s off=%d size=%d",
+			tn_buf, off, size);
+		return -EACCES;
+	}
+
+	if (off >= 0 || off < -MAX_BPF_STACK) {
+		verbose(env, "invalid stack off=%d size=%d\n", off, size);
+		return -EACCES;
+	}
+
+	return 0;
+}
+
 /* check read/write into map element returned by bpf_map_lookup_elem() */
 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
 			      int size, bool zero_size_allowed)
@@ -1268,13 +1297,17 @@
 	 */
 	if (env->log.level)
 		print_verifier_state(env, state);
+
 	/* The minimum value is only important with signed
 	 * comparisons where we can't assume the floor of a
 	 * value is 0.  If we are using signed variables for our
 	 * index'es we need to make sure that whatever we use
 	 * will have a set floor within our range.
 	 */
-	if (reg->smin_value < 0) {
+	if (reg->smin_value < 0 &&
+	    (reg->smin_value == S64_MIN ||
+	     (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
+	      reg->smin_value + off < 0)) {
 		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
 			regno);
 		return -EACCES;
@@ -1735,24 +1768,10 @@
 		}
 
 	} else if (reg->type == PTR_TO_STACK) {
-		/* stack accesses must be at a fixed offset, so that we can
-		 * determine what type of data were returned.
-		 * See check_stack_read().
-		 */
-		if (!tnum_is_const(reg->var_off)) {
-			char tn_buf[48];
-
-			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
-			verbose(env, "variable stack access var_off=%s off=%d size=%d",
-				tn_buf, off, size);
-			return -EACCES;
-		}
 		off += reg->var_off.value;
-		if (off >= 0 || off < -MAX_BPF_STACK) {
-			verbose(env, "invalid stack off=%d size=%d\n", off,
-				size);
-			return -EACCES;
-		}
+		err = check_stack_access(env, reg, off, size);
+		if (err)
+			return err;
 
 		state = func(env, reg);
 		err = update_stack_depth(env, state, off);
@@ -2682,6 +2701,125 @@
 	return true;
 }
 
+static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
+{
+	return &env->insn_aux_data[env->insn_idx];
+}
+
+static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+			      u32 *ptr_limit, u8 opcode, bool off_is_neg)
+{
+	bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
+			    (opcode == BPF_SUB && !off_is_neg);
+	u32 off;
+
+	switch (ptr_reg->type) {
+	case PTR_TO_STACK:
+		off = ptr_reg->off + ptr_reg->var_off.value;
+		if (mask_to_left)
+			*ptr_limit = MAX_BPF_STACK + off;
+		else
+			*ptr_limit = -off;
+		return 0;
+	case PTR_TO_MAP_VALUE:
+		if (mask_to_left) {
+			*ptr_limit = ptr_reg->umax_value + ptr_reg->off;
+		} else {
+			off = ptr_reg->smin_value + ptr_reg->off;
+			*ptr_limit = ptr_reg->map_ptr->value_size - off;
+		}
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
+				    const struct bpf_insn *insn)
+{
+	return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
+}
+
+static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
+				       u32 alu_state, u32 alu_limit)
+{
+	/* If we arrived here from different branches with different
+	 * state or limits to sanitize, then this won't work.
+	 */
+	if (aux->alu_state &&
+	    (aux->alu_state != alu_state ||
+	     aux->alu_limit != alu_limit))
+		return -EACCES;
+
+	/* Corresponding fixup done in fixup_bpf_calls(). */
+	aux->alu_state = alu_state;
+	aux->alu_limit = alu_limit;
+	return 0;
+}
+
+static int sanitize_val_alu(struct bpf_verifier_env *env,
+			    struct bpf_insn *insn)
+{
+	struct bpf_insn_aux_data *aux = cur_aux(env);
+
+	if (can_skip_alu_sanitation(env, insn))
+		return 0;
+
+	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
+}
+
+static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+			    struct bpf_insn *insn,
+			    const struct bpf_reg_state *ptr_reg,
+			    struct bpf_reg_state *dst_reg,
+			    bool off_is_neg)
+{
+	struct bpf_verifier_state *vstate = env->cur_state;
+	struct bpf_insn_aux_data *aux = cur_aux(env);
+	bool ptr_is_dst_reg = ptr_reg == dst_reg;
+	u8 opcode = BPF_OP(insn->code);
+	u32 alu_state, alu_limit;
+	struct bpf_reg_state tmp;
+	bool ret;
+
+	if (can_skip_alu_sanitation(env, insn))
+		return 0;
+
+	/* We already marked aux for masking from non-speculative
+	 * paths, thus we got here in the first place. We only care
+	 * to explore bad access from here.
+	 */
+	if (vstate->speculative)
+		goto do_sim;
+
+	alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
+	alu_state |= ptr_is_dst_reg ?
+		     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+
+	if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
+		return 0;
+	if (update_alu_sanitation_state(aux, alu_state, alu_limit))
+		return -EACCES;
+do_sim:
+	/* Simulate and find potential out-of-bounds access under
+	 * speculative execution from truncation as a result of
+	 * masking when off was not within expected range. If off
+	 * sits in dst, then we temporarily need to move ptr there
+	 * to simulate dst (== 0) +/-= ptr. Needed, for example,
+	 * for cases where we use K-based arithmetic in one direction
+	 * and truncated reg-based in the other in order to explore
+	 * bad access.
+	 */
+	if (!ptr_is_dst_reg) {
+		tmp = *dst_reg;
+		*dst_reg = *ptr_reg;
+	}
+	ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
+	if (!ptr_is_dst_reg)
+		*dst_reg = tmp;
+	return !ret ? -EFAULT : 0;
+}
+
 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
  * Caller should also handle BPF_MOV case separately.
  * If we return -EACCES, caller may want to try again treating pointer as a
@@ -2700,8 +2838,9 @@
 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
+	u32 dst = insn->dst_reg, src = insn->src_reg;
 	u8 opcode = BPF_OP(insn->code);
-	u32 dst = insn->dst_reg;
+	int ret;
 
 	dst_reg = &regs[dst];
 
@@ -2737,6 +2876,12 @@
 			dst);
 		return -EACCES;
 	}
+	if (ptr_reg->type == PTR_TO_MAP_VALUE &&
+	    !env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
+		verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
+			off_reg == dst_reg ? dst : src);
+		return -EACCES;
+	}
 
 	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
 	 * The id may be overwritten later if we create a new variable offset.
@@ -2750,6 +2895,11 @@
 
 	switch (opcode) {
 	case BPF_ADD:
+		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
+		if (ret < 0) {
+			verbose(env, "R%d tried to add from different maps or paths\n", dst);
+			return ret;
+		}
 		/* We can take a fixed offset as long as it doesn't overflow
 		 * the s32 'off' field
 		 */
@@ -2800,6 +2950,11 @@
 		}
 		break;
 	case BPF_SUB:
+		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
+		if (ret < 0) {
+			verbose(env, "R%d tried to sub from different maps or paths\n", dst);
+			return ret;
+		}
 		if (dst_reg == off_reg) {
 			/* scalar -= pointer.  Creates an unknown scalar */
 			verbose(env, "R%d tried to subtract pointer from scalar\n",
@@ -2879,6 +3034,25 @@
 	__update_reg_bounds(dst_reg);
 	__reg_deduce_bounds(dst_reg);
 	__reg_bound_offset(dst_reg);
+
+	/* For unprivileged we require that resulting offset must be in bounds
+	 * in order to be able to sanitize access later on.
+	 */
+	if (!env->allow_ptr_leaks) {
+		if (dst_reg->type == PTR_TO_MAP_VALUE &&
+		    check_map_access(env, dst, dst_reg->off, 1, false)) {
+			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
+				"prohibited for !root\n", dst);
+			return -EACCES;
+		} else if (dst_reg->type == PTR_TO_STACK &&
+			   check_stack_access(env, dst_reg, dst_reg->off +
+					      dst_reg->var_off.value, 1)) {
+			verbose(env, "R%d stack pointer arithmetic goes out of range, "
+				"prohibited for !root\n", dst);
+			return -EACCES;
+		}
+	}
+
 	return 0;
 }
 
@@ -2897,6 +3071,8 @@
 	s64 smin_val, smax_val;
 	u64 umin_val, umax_val;
 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+	u32 dst = insn->dst_reg;
+	int ret;
 
 	if (insn_bitness == 32) {
 		/* Relevant for 32-bit RSH: Information can propagate towards
@@ -2931,6 +3107,11 @@
 
 	switch (opcode) {
 	case BPF_ADD:
+		ret = sanitize_val_alu(env, insn);
+		if (ret < 0) {
+			verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
+			return ret;
+		}
 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
 		    signed_add_overflows(dst_reg->smax_value, smax_val)) {
 			dst_reg->smin_value = S64_MIN;
@@ -2950,6 +3131,11 @@
 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
 		break;
 	case BPF_SUB:
+		ret = sanitize_val_alu(env, insn);
+		if (ret < 0) {
+			verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
+			return ret;
+		}
 		if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
 		    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
 			/* Overflow possible, we know nothing */
@@ -3285,12 +3471,15 @@
 			return err;
 
 		if (BPF_SRC(insn->code) == BPF_X) {
+			struct bpf_reg_state *src_reg = regs + insn->src_reg;
+			struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
+
 			if (BPF_CLASS(insn->code) == BPF_ALU64) {
 				/* case: R1 = R2
 				 * copy register state to dest reg
 				 */
-				regs[insn->dst_reg] = regs[insn->src_reg];
-				regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
+				*dst_reg = *src_reg;
+				dst_reg->live |= REG_LIVE_WRITTEN;
 			} else {
 				/* R1 = (u32) R2 */
 				if (is_pointer_value(env, insn->src_reg)) {
@@ -3298,9 +3487,14 @@
 						"R%d partial copy of pointer\n",
 						insn->src_reg);
 					return -EACCES;
+				} else if (src_reg->type == SCALAR_VALUE) {
+					*dst_reg = *src_reg;
+					dst_reg->live |= REG_LIVE_WRITTEN;
+				} else {
+					mark_reg_unknown(env, regs,
+							 insn->dst_reg);
 				}
-				mark_reg_unknown(env, regs, insn->dst_reg);
-				coerce_reg_to_size(&regs[insn->dst_reg], 4);
+				coerce_reg_to_size(dst_reg, 4);
 			}
 		} else {
 			/* case: R = imm
@@ -3467,6 +3661,79 @@
 	}
 }
 
+/* compute branch direction of the expression "if (reg opcode val) goto target;"
+ * and return:
+ *  1 - branch will be taken and "goto target" will be executed
+ *  0 - branch will not be taken and fall-through to next insn
+ * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
+ */
+static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
+{
+	if (__is_pointer_value(false, reg))
+		return -1;
+
+	switch (opcode) {
+	case BPF_JEQ:
+		if (tnum_is_const(reg->var_off))
+			return !!tnum_equals_const(reg->var_off, val);
+		break;
+	case BPF_JNE:
+		if (tnum_is_const(reg->var_off))
+			return !tnum_equals_const(reg->var_off, val);
+		break;
+	case BPF_JGT:
+		if (reg->umin_value > val)
+			return 1;
+		else if (reg->umax_value <= val)
+			return 0;
+		break;
+	case BPF_JSGT:
+		if (reg->smin_value > (s64)val)
+			return 1;
+		else if (reg->smax_value < (s64)val)
+			return 0;
+		break;
+	case BPF_JLT:
+		if (reg->umax_value < val)
+			return 1;
+		else if (reg->umin_value >= val)
+			return 0;
+		break;
+	case BPF_JSLT:
+		if (reg->smax_value < (s64)val)
+			return 1;
+		else if (reg->smin_value >= (s64)val)
+			return 0;
+		break;
+	case BPF_JGE:
+		if (reg->umin_value >= val)
+			return 1;
+		else if (reg->umax_value < val)
+			return 0;
+		break;
+	case BPF_JSGE:
+		if (reg->smin_value >= (s64)val)
+			return 1;
+		else if (reg->smax_value < (s64)val)
+			return 0;
+		break;
+	case BPF_JLE:
+		if (reg->umax_value <= val)
+			return 1;
+		else if (reg->umin_value > val)
+			return 0;
+		break;
+	case BPF_JSLE:
+		if (reg->smax_value <= (s64)val)
+			return 1;
+		else if (reg->smin_value > (s64)val)
+			return 0;
+		break;
+	}
+
+	return -1;
+}
+
 /* Adjusts the register min/max values in the case that the dst_reg is the
  * variable register that we are working on, and src_reg is a constant or we're
  * simply doing a BPF_K check.
@@ -3860,28 +4127,23 @@
 
 	dst_reg = &regs[insn->dst_reg];
 
-	/* detect if R == 0 where R was initialized to zero earlier */
-	if (BPF_SRC(insn->code) == BPF_K &&
-	    (opcode == BPF_JEQ || opcode == BPF_JNE) &&
-	    dst_reg->type == SCALAR_VALUE &&
-	    tnum_is_const(dst_reg->var_off)) {
-		if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
-		    (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
-			/* if (imm == imm) goto pc+off;
-			 * only follow the goto, ignore fall-through
-			 */
+	if (BPF_SRC(insn->code) == BPF_K) {
+		int pred = is_branch_taken(dst_reg, insn->imm, opcode);
+
+		if (pred == 1) {
+			 /* only follow the goto, ignore fall-through */
 			*insn_idx += insn->off;
 			return 0;
-		} else {
-			/* if (imm != imm) goto pc+off;
-			 * only follow fall-through branch, since
+		} else if (pred == 0) {
+			/* only follow fall-through branch, since
 			 * that's where the program will go
 			 */
 			return 0;
 		}
 	}
 
-	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
+	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
+				  false);
 	if (!other_branch)
 		return -EFAULT;
 	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
@@ -4596,6 +4858,12 @@
 	if (old->curframe != cur->curframe)
 		return false;
 
+	/* Verification state from speculative execution simulation
+	 * must never prune a non-speculative execution one.
+	 */
+	if (old->speculative && !cur->speculative)
+		return false;
+
 	/* for states to be equal callsites have to be the same
 	 * and all frame states need to be equivalent
 	 */
@@ -4660,7 +4928,7 @@
 	struct bpf_verifier_state_list *new_sl;
 	struct bpf_verifier_state_list *sl;
 	struct bpf_verifier_state *cur = env->cur_state;
-	int i, j, err;
+	int i, j, err, states_cnt = 0;
 
 	sl = env->explored_states[insn_idx];
 	if (!sl)
@@ -4687,8 +4955,12 @@
 			return 1;
 		}
 		sl = sl->next;
+		states_cnt++;
 	}
 
+	if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
+		return 0;
+
 	/* there were no equivalent states, remember current one.
 	 * technically the current state is not proven to be safe yet,
 	 * but it will either reach outer most bpf_exit (which means it's safe)
@@ -4736,7 +5008,6 @@
 	struct bpf_insn *insns = env->prog->insnsi;
 	struct bpf_reg_state *regs;
 	int insn_cnt = env->prog->len, i;
-	int insn_idx, prev_insn_idx = 0;
 	int insn_processed = 0;
 	bool do_print_state = false;
 
@@ -4744,7 +5015,7 @@
 	if (!state)
 		return -ENOMEM;
 	state->curframe = 0;
-	state->parent = NULL;
+	state->speculative = false;
 	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
 	if (!state->frame[0]) {
 		kfree(state);
@@ -4755,19 +5026,19 @@
 			BPF_MAIN_FUNC /* callsite */,
 			0 /* frameno */,
 			0 /* subprogno, zero == main subprog */);
-	insn_idx = 0;
+
 	for (;;) {
 		struct bpf_insn *insn;
 		u8 class;
 		int err;
 
-		if (insn_idx >= insn_cnt) {
+		if (env->insn_idx >= insn_cnt) {
 			verbose(env, "invalid insn idx %d insn_cnt %d\n",
-				insn_idx, insn_cnt);
+				env->insn_idx, insn_cnt);
 			return -EFAULT;
 		}
 
-		insn = &insns[insn_idx];
+		insn = &insns[env->insn_idx];
 		class = BPF_CLASS(insn->code);
 
 		if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
@@ -4777,17 +5048,19 @@
 			return -E2BIG;
 		}
 
-		err = is_state_visited(env, insn_idx);
+		err = is_state_visited(env, env->insn_idx);
 		if (err < 0)
 			return err;
 		if (err == 1) {
 			/* found equivalent state, can prune the search */
 			if (env->log.level) {
 				if (do_print_state)
-					verbose(env, "\nfrom %d to %d: safe\n",
-						prev_insn_idx, insn_idx);
+					verbose(env, "\nfrom %d to %d%s: safe\n",
+						env->prev_insn_idx, env->insn_idx,
+						env->cur_state->speculative ?
+						" (speculative execution)" : "");
 				else
-					verbose(env, "%d: safe\n", insn_idx);
+					verbose(env, "%d: safe\n", env->insn_idx);
 			}
 			goto process_bpf_exit;
 		}
@@ -4800,10 +5073,12 @@
 
 		if (env->log.level > 1 || (env->log.level && do_print_state)) {
 			if (env->log.level > 1)
-				verbose(env, "%d:", insn_idx);
+				verbose(env, "%d:", env->insn_idx);
 			else
-				verbose(env, "\nfrom %d to %d:",
-					prev_insn_idx, insn_idx);
+				verbose(env, "\nfrom %d to %d%s:",
+					env->prev_insn_idx, env->insn_idx,
+					env->cur_state->speculative ?
+					" (speculative execution)" : "");
 			print_verifier_state(env, state->frame[state->curframe]);
 			do_print_state = false;
 		}
@@ -4814,19 +5089,20 @@
 				.private_data	= env,
 			};
 
-			verbose(env, "%d: ", insn_idx);
+			verbose(env, "%d: ", env->insn_idx);
 			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
 		}
 
 		if (bpf_prog_is_dev_bound(env->prog->aux)) {
-			err = bpf_prog_offload_verify_insn(env, insn_idx,
-							   prev_insn_idx);
+			err = bpf_prog_offload_verify_insn(env, env->insn_idx,
+							   env->prev_insn_idx);
 			if (err)
 				return err;
 		}
 
 		regs = cur_regs(env);
-		env->insn_aux_data[insn_idx].seen = true;
+		env->insn_aux_data[env->insn_idx].seen = true;
+
 		if (class == BPF_ALU || class == BPF_ALU64) {
 			err = check_alu_op(env, insn);
 			if (err)
@@ -4851,13 +5127,13 @@
 			/* check that memory (src_reg + off) is readable,
 			 * the state of dst_reg will be updated by this func
 			 */
-			err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
-					       BPF_SIZE(insn->code), BPF_READ,
-					       insn->dst_reg, false);
+			err = check_mem_access(env, env->insn_idx, insn->src_reg,
+					       insn->off, BPF_SIZE(insn->code),
+					       BPF_READ, insn->dst_reg, false);
 			if (err)
 				return err;
 
-			prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
+			prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
 
 			if (*prev_src_type == NOT_INIT) {
 				/* saw a valid insn
@@ -4884,10 +5160,10 @@
 			enum bpf_reg_type *prev_dst_type, dst_reg_type;
 
 			if (BPF_MODE(insn->code) == BPF_XADD) {
-				err = check_xadd(env, insn_idx, insn);
+				err = check_xadd(env, env->insn_idx, insn);
 				if (err)
 					return err;
-				insn_idx++;
+				env->insn_idx++;
 				continue;
 			}
 
@@ -4903,13 +5179,13 @@
 			dst_reg_type = regs[insn->dst_reg].type;
 
 			/* check that memory (dst_reg + off) is writeable */
-			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
-					       BPF_SIZE(insn->code), BPF_WRITE,
-					       insn->src_reg, false);
+			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
+					       insn->off, BPF_SIZE(insn->code),
+					       BPF_WRITE, insn->src_reg, false);
 			if (err)
 				return err;
 
-			prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
+			prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
 
 			if (*prev_dst_type == NOT_INIT) {
 				*prev_dst_type = dst_reg_type;
@@ -4938,9 +5214,9 @@
 			}
 
 			/* check that memory (dst_reg + off) is writeable */
-			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
-					       BPF_SIZE(insn->code), BPF_WRITE,
-					       -1, false);
+			err = check_mem_access(env, env->insn_idx, insn->dst_reg,
+					       insn->off, BPF_SIZE(insn->code),
+					       BPF_WRITE, -1, false);
 			if (err)
 				return err;
 
@@ -4958,9 +5234,9 @@
 				}
 
 				if (insn->src_reg == BPF_PSEUDO_CALL)
-					err = check_func_call(env, insn, &insn_idx);
+					err = check_func_call(env, insn, &env->insn_idx);
 				else
-					err = check_helper_call(env, insn->imm, insn_idx);
+					err = check_helper_call(env, insn->imm, env->insn_idx);
 				if (err)
 					return err;
 
@@ -4973,7 +5249,7 @@
 					return -EINVAL;
 				}
 
-				insn_idx += insn->off + 1;
+				env->insn_idx += insn->off + 1;
 				continue;
 
 			} else if (opcode == BPF_EXIT) {
@@ -4987,8 +5263,8 @@
 
 				if (state->curframe) {
 					/* exit from nested function */
-					prev_insn_idx = insn_idx;
-					err = prepare_func_exit(env, &insn_idx);
+					env->prev_insn_idx = env->insn_idx;
+					err = prepare_func_exit(env, &env->insn_idx);
 					if (err)
 						return err;
 					do_print_state = true;
@@ -5014,7 +5290,8 @@
 				if (err)
 					return err;
 process_bpf_exit:
-				err = pop_stack(env, &prev_insn_idx, &insn_idx);
+				err = pop_stack(env, &env->prev_insn_idx,
+						&env->insn_idx);
 				if (err < 0) {
 					if (err != -ENOENT)
 						return err;
@@ -5024,7 +5301,7 @@
 					continue;
 				}
 			} else {
-				err = check_cond_jmp_op(env, insn, &insn_idx);
+				err = check_cond_jmp_op(env, insn, &env->insn_idx);
 				if (err)
 					return err;
 			}
@@ -5041,8 +5318,8 @@
 				if (err)
 					return err;
 
-				insn_idx++;
-				env->insn_aux_data[insn_idx].seen = true;
+				env->insn_idx++;
+				env->insn_aux_data[env->insn_idx].seen = true;
 			} else {
 				verbose(env, "invalid BPF_LD mode\n");
 				return -EINVAL;
@@ -5052,7 +5329,7 @@
 			return -EINVAL;
 		}
 
-		insn_idx++;
+		env->insn_idx++;
 	}
 
 	verbose(env, "processed %d insns (limit %d), stack depth ",
@@ -5341,10 +5618,10 @@
 	int i, cnt, size, ctx_field_size, delta = 0;
 	const int insn_cnt = env->prog->len;
 	struct bpf_insn insn_buf[16], *insn;
+	u32 target_size, size_default, off;
 	struct bpf_prog *new_prog;
 	enum bpf_access_type type;
 	bool is_narrower_load;
-	u32 target_size;
 
 	if (ops->gen_prologue) {
 		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
@@ -5421,9 +5698,9 @@
 		 * we will apply proper mask to the result.
 		 */
 		is_narrower_load = size < ctx_field_size;
+		size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
+		off = insn->off;
 		if (is_narrower_load) {
-			u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
-			u32 off = insn->off;
 			u8 size_code;
 
 			if (type == BPF_WRITE) {
@@ -5451,12 +5728,23 @@
 		}
 
 		if (is_narrower_load && size < target_size) {
-			if (ctx_field_size <= 4)
+			u8 shift = (off & (size_default - 1)) * 8;
+
+			if (ctx_field_size <= 4) {
+				if (shift)
+					insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
+									insn->dst_reg,
+									shift);
 				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
 								(1 << size * 8) - 1);
-			else
+			} else {
+				if (shift)
+					insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
+									insn->dst_reg,
+									shift);
 				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
 								(1 << size * 8) - 1);
+			}
 		}
 
 		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
@@ -5737,6 +6025,57 @@
 			continue;
 		}
 
+		if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
+		    insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
+			const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
+			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
+			struct bpf_insn insn_buf[16];
+			struct bpf_insn *patch = &insn_buf[0];
+			bool issrc, isneg;
+			u32 off_reg;
+
+			aux = &env->insn_aux_data[i + delta];
+			if (!aux->alu_state)
+				continue;
+
+			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
+			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
+				BPF_ALU_SANITIZE_SRC;
+
+			off_reg = issrc ? insn->src_reg : insn->dst_reg;
+			if (isneg)
+				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+			*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
+			*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
+			*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
+			*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
+			*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
+			if (issrc) {
+				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
+							 off_reg);
+				insn->src_reg = BPF_REG_AX;
+			} else {
+				*patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
+							 BPF_REG_AX);
+			}
+			if (isneg)
+				insn->code = insn->code == code_add ?
+					     code_sub : code_add;
+			*patch++ = *insn;
+			if (issrc && isneg)
+				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+			cnt = patch - insn_buf;
+
+			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+			if (!new_prog)
+				return -ENOMEM;
+
+			delta    += cnt - 1;
+			env->prog = prog = new_prog;
+			insn      = new_prog->insnsi + i + delta;
+			continue;
+		}
+
 		if (insn->code != (BPF_JMP | BPF_CALL))
 			continue;
 		if (insn->src_reg == BPF_PSEUDO_CALL)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 4a3dae2..1aa5179 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -4186,20 +4186,25 @@
 
 	lockdep_assert_held(&css_set_lock);
 repeat:
-	/*
-	 * Advance iterator to find next entry.  cset->tasks is consumed
-	 * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
-	 * next cset.
-	 */
-	next = it->task_pos->next;
+	if (it->task_pos) {
+		/*
+		 * Advance iterator to find next entry.  cset->tasks is
+		 * consumed first and then ->mg_tasks.  After ->mg_tasks,
+		 * we move onto the next cset.
+		 */
+		next = it->task_pos->next;
 
-	if (next == it->tasks_head)
-		next = it->mg_tasks_head->next;
+		if (next == it->tasks_head)
+			next = it->mg_tasks_head->next;
 
-	if (next == it->mg_tasks_head)
+		if (next == it->mg_tasks_head)
+			css_task_iter_advance_css_set(it);
+		else
+			it->task_pos = next;
+	} else {
+		/* called from start, proceed to the first cset */
 		css_task_iter_advance_css_set(it);
-	else
-		it->task_pos = next;
+	}
 
 	/* if PROCS, skip over tasks which aren't group leaders */
 	if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
@@ -4239,7 +4244,7 @@
 
 	it->cset_head = it->cset_pos;
 
-	css_task_iter_advance_css_set(it);
+	css_task_iter_advance(it);
 
 	spin_unlock_irq(&css_set_lock);
 }
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
index 67b02e1..da3b611 100644
--- a/kernel/cpu_pm.c
+++ b/kernel/cpu_pm.c
@@ -22,6 +22,8 @@
 #include <linux/spinlock.h>
 #include <linux/syscore_ops.h>
 
+bool from_suspend;
+
 static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
 
 static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
@@ -181,6 +183,7 @@
 {
 	int ret;
 
+	from_suspend = true;
 	ret = cpu_pm_enter();
 	if (ret)
 		return ret;
@@ -191,6 +194,7 @@
 
 static void cpu_pm_resume(void)
 {
+	from_suspend = false;
 	cpu_cluster_pm_exit();
 	cpu_pm_exit();
 }
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 597d408..b75667d 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -29,14 +29,21 @@
 	return NULL;
 }
 
-static inline dma_addr_t dma_get_device_base(struct device *dev,
-					     struct dma_coherent_mem * mem)
+dma_addr_t dma_get_device_base(struct device *dev,
+			       struct dma_coherent_mem *mem)
 {
 	if (mem->use_dev_dma_pfn_offset)
 		return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
 	else
 		return mem->device_base;
 }
+EXPORT_SYMBOL(dma_get_device_base);
+
+unsigned long dma_get_size(struct dma_coherent_mem *mem)
+{
+	return mem->size << PAGE_SHIFT;
+}
+EXPORT_SYMBOL(dma_get_size);
 
 static int dma_init_coherent_memory(
 	phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index de87b02..1d2f147 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -168,7 +168,12 @@
 int dma_direct_supported(struct device *dev, u64 mask)
 {
 #ifdef CONFIG_ZONE_DMA
-	if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
+	/*
+	 * This check needs to be against the actual bit mask value, so
+	 * use __phys_to_dma() here so that the SME encryption mask isn't
+	 * part of the check.
+	 */
+	if (mask < __phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
 		return 0;
 #else
 	/*
@@ -176,8 +181,12 @@
 	 * to be able to satisfy them - either by not supporting more physical
 	 * memory, or by providing a ZONE_DMA32.  If neither is the case, the
 	 * architecture needs to use an IOMMU instead of the direct mapping.
+	 *
+	 * This check needs to be against the actual bit mask value, so
+	 * use __phys_to_dma() here so that the SME encryption mask isn't
+	 * part of the check.
 	 */
-	if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
+	if (mask < __phys_to_dma(dev, DMA_BIT_MASK(32)))
 		return 0;
 #endif
 	/*
diff --git a/kernel/dma/removed.c b/kernel/dma/removed.c
index fe3fd70..6a1c87f 100644
--- a/kernel/dma/removed.c
+++ b/kernel/dma/removed.c
@@ -75,7 +75,7 @@
 	bool skip_zeroing = attrs & DMA_ATTR_SKIP_ZEROING;
 	int pageno;
 	unsigned long order;
-	void *addr = NULL;
+	void __iomem *addr = NULL;
 	struct removed_region *dma_mem = dev->removed_mem;
 	int nbits;
 	unsigned int align;
@@ -108,7 +108,7 @@
 			goto out;
 		}
 
-		addr = ioremap(base, size);
+		addr = ioremap_wc(base, size);
 		if (WARN_ON(!addr)) {
 			bitmap_clear(dma_mem->bitmap, pageno, nbits);
 		} else {
@@ -202,10 +202,10 @@
 {
 }
 
-void *removed_remap(struct device *dev, void *cpu_addr, dma_addr_t handle,
-			size_t size, unsigned long attrs)
+static void __iomem *removed_remap(struct device *dev, void *cpu_addr,
+			dma_addr_t handle, size_t size, unsigned long attrs)
 {
-	return ioremap(handle, size);
+	return ioremap_wc(handle, size);
 }
 
 void removed_unremap(struct device *dev, void *remapped_address, size_t size)
diff --git a/kernel/exit.c b/kernel/exit.c
index 8dd63b6..7b5be76 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -558,12 +558,14 @@
 	return NULL;
 }
 
-static struct task_struct *find_child_reaper(struct task_struct *father)
+static struct task_struct *find_child_reaper(struct task_struct *father,
+						struct list_head *dead)
 	__releases(&tasklist_lock)
 	__acquires(&tasklist_lock)
 {
 	struct pid_namespace *pid_ns = task_active_pid_ns(father);
 	struct task_struct *reaper = pid_ns->child_reaper;
+	struct task_struct *p, *n;
 
 	if (likely(reaper != father))
 		return reaper;
@@ -579,6 +581,12 @@
 		panic("Attempted to kill init! exitcode=0x%08x\n",
 			father->signal->group_exit_code ?: father->exit_code);
 	}
+
+	list_for_each_entry_safe(p, n, dead, ptrace_entry) {
+		list_del_init(&p->ptrace_entry);
+		release_task(p);
+	}
+
 	zap_pid_ns_processes(pid_ns);
 	write_lock_irq(&tasklist_lock);
 
@@ -668,7 +676,7 @@
 		exit_ptrace(father, dead);
 
 	/* Can drop and reacquire tasklist_lock */
-	reaper = find_child_reaper(father);
+	reaper = find_child_reaper(father, dead);
 	if (list_empty(&father->children))
 		return;
 
diff --git a/kernel/fork.c b/kernel/fork.c
index b1e82e3..ee2159a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1789,8 +1789,6 @@
 
 	posix_cpu_timers_init(p);
 
-	p->start_time = ktime_get_ns();
-	p->real_start_time = ktime_get_boot_ns();
 	p->io_context = NULL;
 	audit_set_context(p, NULL);
 	cgroup_fork(p);
@@ -1955,6 +1953,17 @@
 		goto bad_fork_free_pid;
 
 	/*
+	 * From this point on we must avoid any synchronous user-space
+	 * communication until we take the tasklist-lock. In particular, we do
+	 * not want user-space to be able to predict the process start-time by
+	 * stalling fork(2) after we recorded the start_time but before it is
+	 * visible to the system.
+	 */
+
+	p->start_time = ktime_get_ns();
+	p->real_start_time = ktime_get_boot_ns();
+
+	/*
 	 * Make it visible to the rest of the system, but dont wake it up yet.
 	 * Need tasklist lock for parent etc handling!
 	 */
diff --git a/kernel/futex.c b/kernel/futex.c
index 11fc3bb..f89abca 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1148,11 +1148,65 @@
 	return ret;
 }
 
+static int handle_exit_race(u32 __user *uaddr, u32 uval,
+			    struct task_struct *tsk)
+{
+	u32 uval2;
+
+	/*
+	 * If PF_EXITPIDONE is not yet set, then try again.
+	 */
+	if (tsk && !(tsk->flags & PF_EXITPIDONE))
+		return -EAGAIN;
+
+	/*
+	 * Reread the user space value to handle the following situation:
+	 *
+	 * CPU0				CPU1
+	 *
+	 * sys_exit()			sys_futex()
+	 *  do_exit()			 futex_lock_pi()
+	 *                                futex_lock_pi_atomic()
+	 *   exit_signals(tsk)		    No waiters:
+	 *    tsk->flags |= PF_EXITING;	    *uaddr == 0x00000PID
+	 *  mm_release(tsk)		    Set waiter bit
+	 *   exit_robust_list(tsk) {	    *uaddr = 0x80000PID;
+	 *      Set owner died		    attach_to_pi_owner() {
+	 *    *uaddr = 0xC0000000;	     tsk = get_task(PID);
+	 *   }				     if (!tsk->flags & PF_EXITING) {
+	 *  ...				       attach();
+	 *  tsk->flags |= PF_EXITPIDONE;     } else {
+	 *				       if (!(tsk->flags & PF_EXITPIDONE))
+	 *				         return -EAGAIN;
+	 *				       return -ESRCH; <--- FAIL
+	 *				     }
+	 *
+	 * Returning ESRCH unconditionally is wrong here because the
+	 * user space value has been changed by the exiting task.
+	 *
+	 * The same logic applies to the case where the exiting task is
+	 * already gone.
+	 */
+	if (get_futex_value_locked(&uval2, uaddr))
+		return -EFAULT;
+
+	/* If the user space value has changed, try again. */
+	if (uval2 != uval)
+		return -EAGAIN;
+
+	/*
+	 * The exiting task did not have a robust list, the robust list was
+	 * corrupted or the user space value in *uaddr is simply bogus.
+	 * Give up and tell user space.
+	 */
+	return -ESRCH;
+}
+
 /*
  * Lookup the task for the TID provided from user space and attach to
  * it after doing proper sanity checks.
  */
-static int attach_to_pi_owner(u32 uval, union futex_key *key,
+static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
 			      struct futex_pi_state **ps)
 {
 	pid_t pid = uval & FUTEX_TID_MASK;
@@ -1162,12 +1216,15 @@
 	/*
 	 * We are the first waiter - try to look up the real owner and attach
 	 * the new pi_state to it, but bail out when TID = 0 [1]
+	 *
+	 * The !pid check is paranoid. None of the call sites should end up
+	 * with pid == 0, but better safe than sorry. Let the caller retry
 	 */
 	if (!pid)
-		return -ESRCH;
+		return -EAGAIN;
 	p = find_get_task_by_vpid(pid);
 	if (!p)
-		return -ESRCH;
+		return handle_exit_race(uaddr, uval, NULL);
 
 	if (unlikely(p->flags & PF_KTHREAD)) {
 		put_task_struct(p);
@@ -1187,7 +1244,7 @@
 		 * set, we know that the task has finished the
 		 * cleanup:
 		 */
-		int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
+		int ret = handle_exit_race(uaddr, uval, p);
 
 		raw_spin_unlock_irq(&p->pi_lock);
 		put_task_struct(p);
@@ -1244,7 +1301,7 @@
 	 * We are the first waiter - try to look up the owner based on
 	 * @uval and attach to it.
 	 */
-	return attach_to_pi_owner(uval, key, ps);
+	return attach_to_pi_owner(uaddr, uval, key, ps);
 }
 
 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
@@ -1352,7 +1409,7 @@
 	 * attach to the owner. If that fails, no harm done, we only
 	 * set the FUTEX_WAITERS bit in the user space variable.
 	 */
-	return attach_to_pi_owner(uval, key, ps);
+	return attach_to_pi_owner(uaddr, newval, key, ps);
 }
 
 /**
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 5b8600d..7c5fb8a 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -122,23 +122,25 @@
 	resource_size_t align_start, align_size;
 	unsigned long pfn;
 
+	pgmap->kill(pgmap->ref);
 	for_each_device_pfn(pfn, pgmap)
 		put_page(pfn_to_page(pfn));
 
-	if (percpu_ref_tryget_live(pgmap->ref)) {
-		dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
-		percpu_ref_put(pgmap->ref);
-	}
-
 	/* pages are dead and unused, undo the arch mapping */
 	align_start = res->start & ~(SECTION_SIZE - 1);
 	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
 		- align_start;
 
 	mem_hotplug_begin();
-	arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
-			&pgmap->altmap : NULL);
-	kasan_remove_zero_shadow(__va(align_start), align_size);
+	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+		pfn = align_start >> PAGE_SHIFT;
+		__remove_pages(page_zone(pfn_to_page(pfn)), pfn,
+				align_size >> PAGE_SHIFT, NULL);
+	} else {
+		arch_remove_memory(align_start, align_size,
+				pgmap->altmap_valid ? &pgmap->altmap : NULL);
+		kasan_remove_zero_shadow(__va(align_start), align_size);
+	}
 	mem_hotplug_done();
 
 	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
@@ -150,7 +152,7 @@
 /**
  * devm_memremap_pages - remap and provide memmap backing for the given resource
  * @dev: hosting device for @res
- * @pgmap: pointer to a struct dev_pgmap
+ * @pgmap: pointer to a struct dev_pagemap
  *
  * Notes:
  * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
@@ -159,11 +161,8 @@
  * 2/ The altmap field may optionally be initialized, in which case altmap_valid
  *    must be set to true
  *
- * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
- *    time (or devm release event). The expected order of events is that ref has
- *    been through percpu_ref_kill() before devm_memremap_pages_release(). The
- *    wait for the completion of all references being dropped and
- *    percpu_ref_exit() must occur after devm_memremap_pages_release().
+ * 3/ pgmap->ref must be 'live' on entry and will be killed at
+ *    devm_memremap_pages_release() time, or if this routine fails.
  *
  * 4/ res is expected to be a host memory range that could feasibly be
  *    treated as a "System RAM" range, i.e. not a device mmio range, but
@@ -180,6 +179,9 @@
 	int error, nid, is_ram;
 	struct dev_pagemap *conflict_pgmap;
 
+	if (!pgmap->ref || !pgmap->kill)
+		return ERR_PTR(-EINVAL);
+
 	align_start = res->start & ~(SECTION_SIZE - 1);
 	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
 		- align_start;
@@ -202,18 +204,13 @@
 	is_ram = region_intersects(align_start, align_size,
 		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
 
-	if (is_ram == REGION_MIXED) {
-		WARN_ONCE(1, "%s attempted on mixed region %pr\n",
-				__func__, res);
-		return ERR_PTR(-ENXIO);
+	if (is_ram != REGION_DISJOINT) {
+		WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
+				is_ram == REGION_MIXED ? "mixed" : "ram", res);
+		error = -ENXIO;
+		goto err_array;
 	}
 
-	if (is_ram == REGION_INTERSECTS)
-		return __va(res->start);
-
-	if (!pgmap->ref)
-		return ERR_PTR(-EINVAL);
-
 	pgmap->dev = dev;
 
 	mutex_lock(&pgmap_lock);
@@ -241,17 +238,40 @@
 		goto err_pfn_remap;
 
 	mem_hotplug_begin();
-	error = kasan_add_zero_shadow(__va(align_start), align_size);
-	if (error) {
-		mem_hotplug_done();
-		goto err_kasan;
+
+	/*
+	 * For device private memory we call add_pages() as we only need to
+	 * allocate and initialize struct page for the device memory. More-
+	 * over the device memory is un-accessible thus we do not want to
+	 * create a linear mapping for the memory like arch_add_memory()
+	 * would do.
+	 *
+	 * For all other device memory types, which are accessible by
+	 * the CPU, we do want the linear mapping and thus use
+	 * arch_add_memory().
+	 */
+	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+		error = add_pages(nid, align_start >> PAGE_SHIFT,
+				align_size >> PAGE_SHIFT, NULL, false);
+	} else {
+		error = kasan_add_zero_shadow(__va(align_start), align_size);
+		if (error) {
+			mem_hotplug_done();
+			goto err_kasan;
+		}
+
+		error = arch_add_memory(nid, align_start, align_size, altmap,
+				false);
 	}
 
-	error = arch_add_memory(nid, align_start, align_size, altmap, false);
-	if (!error)
-		move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
-					align_start >> PAGE_SHIFT,
-					align_size >> PAGE_SHIFT, altmap);
+	if (!error) {
+		struct zone *zone;
+
+		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
+		move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
+				align_size >> PAGE_SHIFT, altmap);
+	}
+
 	mem_hotplug_done();
 	if (error)
 		goto err_add_memory;
@@ -270,7 +290,10 @@
 		percpu_ref_get(pgmap->ref);
 	}
 
-	devm_add_action(dev, devm_memremap_pages_release, pgmap);
+	error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
+			pgmap);
+	if (error)
+		return ERR_PTR(error);
 
 	return __va(res->start);
 
@@ -281,9 +304,11 @@
  err_pfn_remap:
  err_radix:
 	pgmap_radix_release(res, pgoff);
+ err_array:
+	pgmap->kill(pgmap->ref);
 	return ERR_PTR(error);
 }
-EXPORT_SYMBOL(devm_memremap_pages);
+EXPORT_SYMBOL_GPL(devm_memremap_pages);
 
 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
 {
diff --git a/kernel/panic.c b/kernel/panic.c
index 21d1336..11d71ac 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -14,6 +14,7 @@
 #include <linux/kmsg_dump.h>
 #include <linux/kallsyms.h>
 #include <linux/notifier.h>
+#include <linux/vt_kern.h>
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/ftrace.h>
@@ -235,7 +236,10 @@
 	if (_crash_kexec_post_notifiers)
 		__crash_kexec(NULL);
 
-	bust_spinlocks(0);
+#ifdef CONFIG_VT
+	unblank_screen();
+#endif
+	console_unblank();
 
 	/*
 	 * We may have ended up stopping the CPU holding the lock (in
diff --git a/kernel/pid.c b/kernel/pid.c
index cdf63e5..b88fe5e 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -233,8 +233,10 @@
 
 out_free:
 	spin_lock_irq(&pidmap_lock);
-	while (++i <= ns->level)
-		idr_remove(&ns->idr, (pid->numbers + i)->nr);
+	while (++i <= ns->level) {
+		upid = pid->numbers + i;
+		idr_remove(&upid->ns->idr, upid->nr);
+	}
 
 	/* On failure to allocate the first pid, reset the state */
 	if (ns->pid_allocated == PIDNS_ADDING)
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
index 5ec376d..7d66ee6 100644
--- a/kernel/power/energy_model.c
+++ b/kernel/power/energy_model.c
@@ -10,6 +10,7 @@
 
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
+#include <linux/debugfs.h>
 #include <linux/energy_model.h>
 #include <linux/sched/topology.h>
 #include <linux/slab.h>
@@ -23,82 +24,60 @@
  */
 static DEFINE_MUTEX(em_pd_mutex);
 
-static struct kobject *em_kobject;
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *rootdir;
 
-/* Getters for the attributes of em_perf_domain objects */
-struct em_pd_attr {
-	struct attribute attr;
-	ssize_t (*show)(struct em_perf_domain *pd, char *buf);
-	ssize_t (*store)(struct em_perf_domain *pd, const char *buf, size_t s);
-};
-
-#define EM_ATTR_LEN 13
-#define show_table_attr(_attr) \
-static ssize_t show_##_attr(struct em_perf_domain *pd, char *buf) \
-{ \
-	ssize_t cnt = 0; \
-	int i; \
-	for (i = 0; i < pd->nr_cap_states; i++) { \
-		if (cnt >= (ssize_t) (PAGE_SIZE / sizeof(char) \
-				      - (EM_ATTR_LEN + 2))) \
-			goto out; \
-		cnt += scnprintf(&buf[cnt], EM_ATTR_LEN + 1, "%lu ", \
-				 pd->table[i]._attr); \
-	} \
-out: \
-	cnt += sprintf(&buf[cnt], "\n"); \
-	return cnt; \
-}
-
-show_table_attr(power);
-show_table_attr(frequency);
-show_table_attr(cost);
-
-static ssize_t show_cpus(struct em_perf_domain *pd, char *buf)
+static void em_debug_create_cs(struct em_cap_state *cs, struct dentry *pd)
 {
-	return sprintf(buf, "%*pbl\n", cpumask_pr_args(to_cpumask(pd->cpus)));
+	struct dentry *d;
+	char name[24];
+
+	snprintf(name, sizeof(name), "cs:%lu", cs->frequency);
+
+	/* Create per-cs directory */
+	d = debugfs_create_dir(name, pd);
+	debugfs_create_ulong("frequency", 0444, d, &cs->frequency);
+	debugfs_create_ulong("power", 0444, d, &cs->power);
+	debugfs_create_ulong("cost", 0444, d, &cs->cost);
 }
 
-#define pd_attr(_name) em_pd_##_name##_attr
-#define define_pd_attr(_name) static struct em_pd_attr pd_attr(_name) = \
-		__ATTR(_name, 0444, show_##_name, NULL)
-
-define_pd_attr(power);
-define_pd_attr(frequency);
-define_pd_attr(cost);
-define_pd_attr(cpus);
-
-static struct attribute *em_pd_default_attrs[] = {
-	&pd_attr(power).attr,
-	&pd_attr(frequency).attr,
-	&pd_attr(cost).attr,
-	&pd_attr(cpus).attr,
-	NULL
-};
-
-#define to_pd(k) container_of(k, struct em_perf_domain, kobj)
-#define to_pd_attr(a) container_of(a, struct em_pd_attr, attr)
-
-static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
+static int em_debug_cpus_show(struct seq_file *s, void *unused)
 {
-	struct em_perf_domain *pd = to_pd(kobj);
-	struct em_pd_attr *pd_attr = to_pd_attr(attr);
-	ssize_t ret;
+	seq_printf(s, "%*pbl\n", cpumask_pr_args(to_cpumask(s->private)));
 
-	ret = pd_attr->show(pd, buf);
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(em_debug_cpus);
 
-	return ret;
+static void em_debug_create_pd(struct em_perf_domain *pd, int cpu)
+{
+	struct dentry *d;
+	char name[8];
+	int i;
+
+	snprintf(name, sizeof(name), "pd%d", cpu);
+
+	/* Create the directory of the performance domain */
+	d = debugfs_create_dir(name, rootdir);
+
+	debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops);
+
+	/* Create a sub-directory for each capacity state */
+	for (i = 0; i < pd->nr_cap_states; i++)
+		em_debug_create_cs(&pd->table[i], d);
 }
 
-static const struct sysfs_ops em_pd_sysfs_ops = {
-	.show	= show,
-};
+static int __init em_debug_init(void)
+{
+	/* Create /sys/kernel/debug/energy_model directory */
+	rootdir = debugfs_create_dir("energy_model", NULL);
 
-static struct kobj_type ktype_em_pd = {
-	.sysfs_ops	= &em_pd_sysfs_ops,
-	.default_attrs	= em_pd_default_attrs,
-};
-
+	return 0;
+}
+core_initcall(em_debug_init);
+#else /* CONFIG_DEBUG_FS */
+static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {}
+#endif
 static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
 						struct em_data_callback *cb)
 {
@@ -178,10 +157,7 @@
 	pd->nr_cap_states = nr_states;
 	cpumask_copy(to_cpumask(pd->cpus), span);
 
-	ret = kobject_init_and_add(&pd->kobj, &ktype_em_pd, em_kobject,
-				   "pd%u", cpu);
-	if (ret)
-		pr_err("pd%d: failed kobject_init_and_add(): %d\n", cpu, ret);
+	em_debug_create_pd(pd, cpu);
 
 	return pd;
 
@@ -236,15 +212,6 @@
 	 */
 	mutex_lock(&em_pd_mutex);
 
-	if (!em_kobject) {
-		em_kobject = kobject_create_and_add("energy_model",
-						&cpu_subsys.dev_root->kobj);
-		if (!em_kobject) {
-			ret = -ENODEV;
-			goto unlock;
-		}
-	}
-
 	for_each_cpu(cpu, span) {
 		/* Make sure we don't register again an existing domain. */
 		if (READ_ONCE(per_cpu(em_data, cpu))) {
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 3d913f4..5e2e212 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -728,12 +728,22 @@
 		/* silent return to keep pcm code cleaner */
 
 	if (!pm_qos_request_active(req)) {
-		WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
+		WARN(1, "%s called for unknown object\n", __func__);
 		return;
 	}
 
 	cancel_delayed_work_sync(&req->work);
 
+#ifdef CONFIG_SMP
+	if (req->type == PM_QOS_REQ_AFFINE_IRQ) {
+		int ret = 0;
+		/* Get the current affinity */
+		ret = irq_set_affinity_notifier(req->irq, NULL);
+		if (ret)
+			WARN(1, "IRQ affinity notify set failed\n");
+	}
+#endif
+
 	trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
 	pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
 			     &req->node, PM_QOS_REMOVE_REQ,
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 6c9866a..1ff17e2 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -448,10 +448,12 @@
 
 	lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
 	WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
+	spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
 	rcu_segcblist_advance(&sdp->srcu_cblist,
 			      rcu_seq_current(&sp->srcu_gp_seq));
 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
 				       rcu_seq_snap(&sp->srcu_gp_seq));
+	spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
 	smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
 	rcu_seq_start(&sp->srcu_gp_seq);
 	state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 550bb21..414316b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5983,8 +5983,6 @@
 	struct rq *rq = cpu_rq(cpu);
 	struct rq_flags rf;
 
-	watchdog_disable(cpu);
-
 	local_irq_disable();
 
 	irq_migrate_all_off_this_cpu();
@@ -6131,6 +6129,7 @@
 	smp_call_function_any(&avail_cpus, hrtimer_quiesce_cpu, &cpu, 1);
 	smp_call_function_any(&avail_cpus, timer_quiesce_cpu, &cpu, 1);
 
+	watchdog_disable(cpu);
 	stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0);
 
 	calc_load_migrate(rq);
@@ -6174,7 +6173,7 @@
 		stop_cpus(cpumask_of(cpu), do_unisolation_work_cpu_stop, 0);
 
 		/* Kick CPU to immediately do load balancing */
-		if (!atomic_fetch_or(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+		if (!atomic_fetch_or(NOHZ_KICK_MASK, nohz_flags(cpu)))
 			smp_send_reschedule(cpu);
 	}
 
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index d9e7f8d..a12940b 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -236,7 +236,7 @@
 
 	policy->cur = next_freq;
 	for_each_cpu(cpu, policy->cpus)
-		trace_cpu_frequency(next_freq, smp_processor_id());
+		trace_cpu_frequency(next_freq, cpu);
 }
 
 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
@@ -317,8 +317,8 @@
 unsigned long schedutil_freq_util(int cpu, unsigned long util,
 				  unsigned long max, enum schedutil_type type)
 {
+	unsigned long dl_util, irq;
 	struct rq *rq = cpu_rq(cpu);
-	unsigned long irq;
 
 	if (sched_feat(SUGOV_RT_MAX_FREQ) && type == FREQUENCY_UTIL &&
 						rt_rq_is_runnable(&rq->rt))
@@ -339,29 +339,26 @@
 	 * to be delt with. The exact way of doing things depend on the calling
 	 * context.
 	 */
-	if (type == FREQUENCY_UTIL) {
-		/*
-		 * For frequency selection we do not make cpu_util_dl() a
-		 * permanent part of this sum because we want to use
-		 * cpu_bw_dl() later on, but we need to check if the
-		 * CFS+RT+DL sum is saturated (ie. no idle time) such
-		 * that we select f_max when there is no idle time.
-		 *
-		 * NOTE: numerical errors or stop class might cause us
-		 * to not quite hit saturation when we should --
-		 * something for later.
-		 */
-		if ((util + cpu_util_dl(rq)) >= max)
-			return max;
-	} else {
-		/*
-		 * OTOH, for energy computation we need the estimated
-		 * running time, so include util_dl and ignore dl_bw.
-		 */
-		util += cpu_util_dl(rq);
-		if (util >= max)
-			return max;
-	}
+	dl_util = cpu_util_dl(rq);
+
+	/*
+	 * For frequency selection we do not make cpu_util_dl() a permanent part
+	 * of this sum because we want to use cpu_bw_dl() later on, but we need
+	 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
+	 * that we select f_max when there is no idle time.
+	 *
+	 * NOTE: numerical errors or stop class might cause us to not quite hit
+	 * saturation when we should -- something for later.
+	 */
+	if (util + dl_util >= max)
+		return max;
+
+	/*
+	 * OTOH, for energy computation we need the estimated running time, so
+	 * include util_dl and ignore dl_bw.
+	 */
+	if (type == ENERGY_UTIL)
+		util += dl_util;
 
 	/*
 	 * There is still idle time; further improve the number by using the
@@ -375,21 +372,18 @@
 	util = scale_irq_capacity(util, irq, max);
 	util += irq;
 
-	if (type == FREQUENCY_UTIL) {
-		/*
-		 * Bandwidth required by DEADLINE must always be granted
-		 * while, for FAIR and RT, we use blocked utilization of
-		 * IDLE CPUs as a mechanism to gracefully reduce the
-		 * frequency when no tasks show up for longer periods of
-		 * time.
-		 *
-		 * Ideally we would like to set bw_dl as min/guaranteed
-		 * freq and util + bw_dl as requested freq. However,
-		 * cpufreq is not yet ready for such an interface. So,
-		 * we only do the latter for now.
-		 */
+	/*
+	 * Bandwidth required by DEADLINE must always be granted while, for
+	 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
+	 * to gracefully reduce the frequency when no tasks show up for longer
+	 * periods of time.
+	 *
+	 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
+	 * bw_dl as requested freq. However, cpufreq is not yet ready for such
+	 * an interface. So, we only do the latter for now.
+	 */
+	if (type == FREQUENCY_UTIL)
 		util += cpu_bw_dl(rq);
-	}
 
 	return min(max, util);
 }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fdf7436..7ab94c0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -424,10 +424,9 @@
 	}
 }
 
-/* Iterate thr' all leaf cfs_rq's on a runqueue */
-#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)			\
-	list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,	\
-				 leaf_cfs_rq_list)
+/* Iterate through all leaf cfs_rq's on a runqueue: */
+#define for_each_leaf_cfs_rq(rq, cfs_rq) \
+	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
 
 /* Do the two (enqueued) entities belong to the same group ? */
 static inline struct cfs_rq *
@@ -519,8 +518,8 @@
 {
 }
 
-#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)	\
-		for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
+#define for_each_leaf_cfs_rq(rq, cfs_rq)	\
+		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
 
 static inline struct sched_entity *parent_entity(struct sched_entity *se)
 {
@@ -3846,8 +3845,9 @@
 	if (capacity == max_capacity)
 		return true;
 
-	if (task_boost_policy(p) == SCHED_BOOST_ON_BIG
-				&& is_min_capacity_cpu(cpu))
+	if ((task_boost_policy(p) == SCHED_BOOST_ON_BIG ||
+			schedtune_task_boost(p) > 0) &&
+			is_min_capacity_cpu(cpu))
 		return false;
 
 	return task_fits_capacity(p, capacity, cpu);
@@ -8400,11 +8400,13 @@
 
 	p->on_rq = TASK_ON_RQ_MIGRATING;
 	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
+	lockdep_off();
 	double_lock_balance(env->src_rq, env->dst_rq);
 	if (!(env->src_rq->clock_update_flags & RQCF_UPDATED))
 		update_rq_clock(env->src_rq);
 	set_task_cpu(p, env->dst_cpu);
 	double_unlock_balance(env->src_rq, env->dst_rq);
+	lockdep_on();
 }
 
 /*
@@ -8638,27 +8640,10 @@
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
-static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
-{
-	if (cfs_rq->load.weight)
-		return false;
-
-	if (cfs_rq->avg.load_sum)
-		return false;
-
-	if (cfs_rq->avg.util_sum)
-		return false;
-
-	if (cfs_rq->avg.runnable_load_sum)
-		return false;
-
-	return true;
-}
-
 static void update_blocked_averages(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
-	struct cfs_rq *cfs_rq, *pos;
+	struct cfs_rq *cfs_rq;
 	const struct sched_class *curr_class;
 	struct rq_flags rf;
 	bool done = true;
@@ -8670,7 +8655,7 @@
 	 * Iterates the task_group tree in a bottom up fashion, see
 	 * list_add_leaf_cfs_rq() for details.
 	 */
-	for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
+	for_each_leaf_cfs_rq(rq, cfs_rq) {
 		struct sched_entity *se;
 
 		/* throttled entities do not contribute to load */
@@ -8685,13 +8670,6 @@
 		if (se && !skip_blocked_update(se))
 			update_load_avg(cfs_rq_of(se), se, 0);
 
-		/*
-		 * There can be a lot of idle CPU cgroups.  Don't let fully
-		 * decayed cfs_rqs linger on the list.
-		 */
-		if (cfs_rq_is_decayed(cfs_rq))
-			list_del_leaf_cfs_rq(cfs_rq);
-
 		/* Don't need periodic decay once load/util_avg are null */
 		if (cfs_rq_has_blocked(cfs_rq))
 			done = false;
@@ -11899,10 +11877,10 @@
 #ifdef CONFIG_SCHED_DEBUG
 void print_cfs_stats(struct seq_file *m, int cpu)
 {
-	struct cfs_rq *cfs_rq, *pos;
+	struct cfs_rq *cfs_rq;
 
 	rcu_read_lock();
-	for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
+	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
 		print_cfs_rq(m, cpu, cfs_rq);
 	rcu_read_unlock();
 }
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 74a2e9c..06dbb45 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -203,35 +203,9 @@
 
 DEFINE_STATIC_KEY_FALSE(sched_energy_present);
 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-unsigned int sysctl_sched_energy_aware = 1;
 DEFINE_MUTEX(sched_energy_mutex);
 bool sched_energy_update;
 
-#ifdef CONFIG_PROC_SYSCTL
-int sched_energy_aware_handler(struct ctl_table *table, int write,
-			 void __user *buffer, size_t *lenp, loff_t *ppos)
-{
-	int ret, state;
-
-	if (write && !capable(CAP_SYS_ADMIN))
-		return -EPERM;
-
-	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-	if (!ret && write) {
-		state = static_branch_unlikely(&sched_energy_present);
-		if (state != sysctl_sched_energy_aware) {
-			mutex_lock(&sched_energy_mutex);
-			sched_energy_update = 1;
-			rebuild_sched_domains();
-			sched_energy_update = 0;
-			mutex_unlock(&sched_energy_mutex);
-		}
-	}
-
-	return ret;
-}
-#endif
-
 static void free_pd(struct perf_domain *pd)
 {
 	struct perf_domain *tmp;
@@ -279,7 +253,7 @@
 	if (!sched_debug() || !pd)
 		return;
 
-	printk(KERN_DEBUG "root_domain %*pbl: ", cpumask_pr_args(cpu_map));
+	printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
 
 	while (pd) {
 		printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_cstate=%d }",
@@ -300,29 +274,13 @@
 	free_pd(pd);
 }
 
-static void sched_energy_start(int ndoms_new, cpumask_var_t doms_new[])
+static void sched_energy_set(bool has_eas)
 {
-	/*
-	 * The conditions for EAS to start are checked during the creation of
-	 * root domains. If one of them meets all conditions, it will have a
-	 * non-null list of performance domains.
-	 */
-	while (ndoms_new) {
-		if (cpu_rq(cpumask_first(doms_new[ndoms_new - 1]))->rd->pd)
-			goto enable;
-		ndoms_new--;
-	}
-
-	if (static_branch_unlikely(&sched_energy_present)) {
+	if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
 		if (sched_debug())
 			pr_info("%s: stopping EAS\n", __func__);
 		static_branch_disable_cpuslocked(&sched_energy_present);
-	}
-
-	return;
-
-enable:
-	if (!static_branch_unlikely(&sched_energy_present)) {
+	} else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
 		if (sched_debug())
 			pr_info("%s: starting EAS\n", __func__);
 		static_branch_enable_cpuslocked(&sched_energy_present);
@@ -355,7 +313,7 @@
 #define EM_MAX_COMPLEXITY 2048
 
 extern struct cpufreq_governor schedutil_gov;
-static void build_perf_domains(const struct cpumask *cpu_map)
+static bool build_perf_domains(const struct cpumask *cpu_map)
 {
 	int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map);
 	struct perf_domain *pd = NULL, *tmp;
@@ -364,9 +322,6 @@
 	struct cpufreq_policy *policy;
 	struct cpufreq_governor *gov;
 
-	if (!sysctl_sched_energy_aware)
-		goto free;
-
 	/* EAS is enabled for asymmetric CPU capacity topologies. */
 	if (!per_cpu(sd_asym_cpucapacity, cpu)) {
 		if (sched_debug()) {
@@ -424,7 +379,7 @@
 	if (tmp)
 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
 
-	return;
+	return !!pd;
 
 free:
 	free_pd(pd);
@@ -432,6 +387,8 @@
 	rcu_assign_pointer(rd->pd, NULL);
 	if (tmp)
 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
+
+	return false;
 }
 #else
 static void free_pd(struct perf_domain *pd) { }
@@ -2198,6 +2155,7 @@
 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
 			     struct sched_domain_attr *dattr_new)
 {
+	bool __maybe_unused has_eas = false;
 	int i, j, n;
 	int new_topology;
 
@@ -2261,15 +2219,17 @@
 	for (i = 0; i < ndoms_new; i++) {
 		for (j = 0; j < n && !sched_energy_update; j++) {
 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
-			    cpu_rq(cpumask_first(doms_cur[j]))->rd->pd)
+			    cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
+				has_eas = true;
 				goto match3;
+			}
 		}
 		/* No match - add perf. domains for a new rd */
-		build_perf_domains(doms_new[i]);
+		has_eas |= build_perf_domains(doms_new[i]);
 match3:
 		;
 	}
-	sched_energy_start(ndoms_new, doms_new);
+	sched_energy_set(has_eas);
 #endif
 
 	/* Remember the new sched domains: */
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index d5bdf1b..b731208 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -242,9 +242,15 @@
 
 bool task_sched_boost(struct task_struct *p)
 {
-	struct schedtune *st = task_schedtune(p);
+	struct schedtune *st;
+	bool sched_boost_enabled;
 
-	return st->sched_boost_enabled;
+	rcu_read_lock();
+	st = task_schedtune(p);
+	sched_boost_enabled = st->sched_boost_enabled;
+	rcu_read_unlock();
+
+	return sched_boost_enabled;
 }
 
 static u64
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 7554136..f1d7af6 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -73,10 +73,16 @@
 				     unsigned long *flags)
 {
 	int cpu;
+	int level = 0;
 
 	local_irq_save(*flags);
-	for_each_cpu(cpu, cpus)
-		raw_spin_lock(&cpu_rq(cpu)->lock);
+	for_each_cpu(cpu, cpus) {
+		if (level == 0)
+			raw_spin_lock(&cpu_rq(cpu)->lock);
+		else
+			raw_spin_lock_nested(&cpu_rq(cpu)->lock, level);
+		level++;
+	}
 }
 
 static void release_rq_locks_irqrestore(const cpumask_t *cpus,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index eaa6c18..9de8648 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -599,17 +599,6 @@
 		.extra1		= &one,
 	},
 #endif
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-	{
-		.procname	= "sched_energy_aware",
-		.data		= &sysctl_sched_energy_aware,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_energy_aware_handler,
-		.extra1		= &zero,
-		.extra2		= &one,
-	},
-#endif
 	{
 		.procname	= "sched_lib_name",
 		.data		= sched_lib_name,
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 901aee2..8116c86 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1893,12 +1893,6 @@
 	unsigned long flags;
 	int i;
 
-	/*
-	 * this BH disable ensures that raise_softirq_irqoff() does
-	 * not wakeup ksoftirqd (and acquire the pi-lock) while
-	 * holding the cpu_base lock
-	 */
-	local_bh_disable();
 	local_irq_save(flags);
 	old_base = &per_cpu(hrtimer_bases, scpu);
 	new_base = this_cpu_ptr(&hrtimer_bases);
@@ -1926,7 +1920,6 @@
 	/* Check, if we got expired work to do */
 	__hrtimer_peek_ahead_timers();
 	local_irq_restore(flags);
-	local_bh_enable();
 }
 
 int hrtimers_dead_cpu(unsigned int scpu)
@@ -1934,7 +1927,14 @@
 	BUG_ON(cpu_online(scpu));
 	tick_cancel_sched_timer(scpu);
 
+	/*
+	 * this BH disable ensures that raise_softirq_irqoff() does
+	 * not wakeup ksoftirqd (and acquire the pi-lock) while
+	 * holding the cpu_base lock
+	 */
+	local_bh_disable();
 	__migrate_hrtimers(scpu, true);
+	local_bh_enable();
 	return 0;
 }
 
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index ce32cf7..76801b9 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -685,6 +685,7 @@
 	 * set up the signal and overrun bookkeeping.
 	 */
 	timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
+	timer->it_interval = ns_to_ktime(timer->it.cpu.incr);
 
 	/*
 	 * This acts as a modification timestamp for the timer,
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 4b9127e..5a01c4f 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -289,9 +289,6 @@
 {
 	struct hrtimer *timer = &timr->it.real.timer;
 
-	if (!timr->it_interval)
-		return;
-
 	timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
 					    timr->it_interval);
 	hrtimer_restart(timer);
@@ -317,7 +314,7 @@
 	if (!timr)
 		return;
 
-	if (timr->it_requeue_pending == info->si_sys_private) {
+	if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
 		timr->kclock->timer_rearm(timr);
 
 		timr->it_active = 1;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 6a492f7..cf9780a 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -29,6 +29,7 @@
 #include <linux/timer.h>
 #include <linux/context_tracking.h>
 #include <linux/mm.h>
+#include <linux/rq_stats.h>
 
 #include <asm/irq_regs.h>
 
@@ -36,6 +37,10 @@
 
 #include <trace/events/timer.h>
 
+struct rq_data rq_info;
+struct workqueue_struct *rq_wq;
+spinlock_t rq_lock;
+
 /*
  * Per-CPU nohz control structure
  */
@@ -1259,6 +1264,42 @@
  * High resolution timer specific code
  */
 #ifdef CONFIG_HIGH_RES_TIMERS
+static void update_rq_stats(void)
+{
+	unsigned long jiffy_gap = 0;
+	unsigned int rq_avg = 0;
+	unsigned long flags = 0;
+
+	jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy;
+	if (jiffy_gap >= rq_info.rq_poll_jiffies) {
+		spin_lock_irqsave(&rq_lock, flags);
+		if (!rq_info.rq_avg)
+			rq_info.rq_poll_total_jiffies = 0;
+		rq_avg = nr_running() * 10;
+		if (rq_info.rq_poll_total_jiffies) {
+			rq_avg = (rq_avg * jiffy_gap) +
+				(rq_info.rq_avg *
+				 rq_info.rq_poll_total_jiffies);
+			do_div(rq_avg,
+				rq_info.rq_poll_total_jiffies + jiffy_gap);
+		}
+		rq_info.rq_avg = rq_avg;
+		rq_info.rq_poll_total_jiffies += jiffy_gap;
+		rq_info.rq_poll_last_jiffy = jiffies;
+		spin_unlock_irqrestore(&rq_lock, flags);
+	}
+}
+static void wakeup_user(void)
+{
+	unsigned long jiffy_gap;
+
+	jiffy_gap = jiffies - rq_info.def_timer_last_jiffy;
+	if (jiffy_gap >= rq_info.def_timer_jiffies) {
+		rq_info.def_timer_last_jiffy = jiffies;
+		queue_work(rq_wq, &rq_info.def_timer_work);
+	}
+}
+
 /*
  * We rearm the timer until we get disabled by the idle code.
  * Called with interrupts disabled.
@@ -1276,8 +1317,20 @@
 	 * Do not call, when we are not in irq context and have
 	 * no valid regs pointer
 	 */
-	if (regs)
+	if (regs) {
 		tick_sched_handle(ts, regs);
+		if (rq_info.init == 1 &&
+				tick_do_timer_cpu == smp_processor_id()) {
+			/*
+			 * update run queue statistics
+			 */
+			update_rq_stats();
+			/*
+			 * wakeup user if needed
+			 */
+			wakeup_user();
+		}
+	}
 	else
 		ts->next_tick = 0;
 
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index f6c10b6..d681cf3 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -629,13 +629,17 @@
 {
 	unsigned int pc = preempt_count();
 #ifdef CONFIG_PREEMPTIRQ_EVENTS
-	struct irqsoff_store *is = &per_cpu(the_irqsoff,
-						raw_smp_processor_id());
-	u64 delta = sched_clock() - is->ts;
+	struct irqsoff_store *is;
+	u64 delta;
+
+	lockdep_off();
+	is = &per_cpu(the_irqsoff, raw_smp_processor_id());
+	delta = sched_clock() - is->ts;
 
 	if (delta > sysctl_irqsoff_tracing_threshold_ns)
 		trace_irqs_disable(delta, is->caddr[0], is->caddr[1],
 						is->caddr[2], is->caddr[3]);
+	lockdep_on();
 #endif /* CONFIG_PREEMPTIRQ_EVENTS */
 
 	if (!preempt_trace(pc) && irq_trace())
@@ -646,14 +650,16 @@
 {
 	unsigned int pc = preempt_count();
 #ifdef CONFIG_PREEMPTIRQ_EVENTS
-	struct irqsoff_store *is = &per_cpu(the_irqsoff,
-						raw_smp_processor_id());
+	struct irqsoff_store *is;
 
+	lockdep_off();
+	is = &per_cpu(the_irqsoff, raw_smp_processor_id());
 	is->ts = sched_clock();
 	is->caddr[0] = CALLER_ADDR0;
 	is->caddr[1] = CALLER_ADDR1;
 	is->caddr[2] = CALLER_ADDR2;
 	is->caddr[3] = CALLER_ADDR3;
+	lockdep_on();
 #endif /* CONFIG_PREEMPTIRQ_EVENTS */
 
 	if (!preempt_trace(pc) && irq_trace())
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 9699e3e9..dff0e01 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -512,14 +512,12 @@
 
 void watchdog_disable(unsigned int cpu)
 {
-	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
-	unsigned int *enabled = this_cpu_ptr(&watchdog_en);
+	struct hrtimer *hrtimer = per_cpu_ptr(&watchdog_hrtimer, cpu);
+	unsigned int *enabled = per_cpu_ptr(&watchdog_en, cpu);
 
 	if (!*enabled)
 		return;
 
-	WARN_ON_ONCE(cpu != smp_processor_id());
-
 	/*
 	 * Disable the perf event first. That prevents that a large delay
 	 * between disabling the timer and disabling the perf event causes
@@ -527,7 +525,7 @@
 	 */
 	watchdog_nmi_disable(cpu);
 	hrtimer_cancel(hrtimer);
-	wait_for_completion(this_cpu_ptr(&softlockup_completion));
+	wait_for_completion(per_cpu_ptr(&softlockup_completion, cpu));
 
 	/*
 	 * No need for barrier here since disabling the watchdog is
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index 14436f4..30e0f97 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -52,7 +52,7 @@
 	if (x <= ULONG_MAX)
 		return int_sqrt((unsigned long) x);
 
-	m = 1ULL << (fls64(x) & ~1ULL);
+	m = 1ULL << ((fls64(x) - 1) & ~1ULL);
 	while (m != 0) {
 		b = y + m;
 		y >>= 1;
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 2f8b61d..7ed43ea 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -18,6 +18,21 @@
 
 ifeq ($(CONFIG_ALTIVEC),y)
 altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
+
+ifdef CONFIG_CC_IS_CLANG
+# clang ppc port does not yet support -maltivec when -msoft-float is
+# enabled. A future release of clang will resolve this
+# https://bugs.llvm.org/show_bug.cgi?id=31177
+CFLAGS_REMOVE_altivec1.o  += -msoft-float
+CFLAGS_REMOVE_altivec2.o  += -msoft-float
+CFLAGS_REMOVE_altivec4.o  += -msoft-float
+CFLAGS_REMOVE_altivec8.o  += -msoft-float
+CFLAGS_REMOVE_altivec8.o  += -msoft-float
+CFLAGS_REMOVE_vpermxor1.o += -msoft-float
+CFLAGS_REMOVE_vpermxor2.o += -msoft-float
+CFLAGS_REMOVE_vpermxor4.o += -msoft-float
+CFLAGS_REMOVE_vpermxor8.o += -msoft-float
+endif
 endif
 
 # The GCC option -ffreestanding is required in order to compile code containing
diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
index d5a06ad..bf864c7 100644
--- a/lib/test_debug_virtual.c
+++ b/lib/test_debug_virtual.c
@@ -5,6 +5,7 @@
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/sizes.h>
+#include <linux/io.h>
 
 #include <asm/page.h>
 #ifdef CONFIG_MIPS
diff --git a/mm/Kconfig b/mm/Kconfig
index 7319a3e..9dce3a8 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -651,6 +651,15 @@
 
 	  A sane initial value is 80 MB.
 
+config BALANCE_ANON_FILE_RECLAIM
+	bool "During reclaim treat anon and file backed pages equally"
+	depends on SWAP
+	help
+	  When performing memory reclaim treat anonymous and file backed pages
+	  equally.
+	  Swapping anonymous pages out to memory can be efficient enough to justify
+	  treating anonymous and file backed pages equally.
+
 config DEFERRED_STRUCT_PAGE_INIT
 	bool "Defer initialisation of struct pages to kthreads"
 	default n
diff --git a/mm/cma.c b/mm/cma.c
index 3c79c67..d8cc98f 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -453,6 +453,8 @@
 	if (!count)
 		return NULL;
 
+	trace_cma_alloc_start(count, align);
+
 	mask = cma_bitmap_aligned_mask(cma, align);
 	offset = cma_bitmap_aligned_offset(cma, align);
 	bitmap_maxno = cma_bitmap_maxno(cma);
@@ -520,6 +522,8 @@
 
 		pr_debug("%s(): memory range at %p is busy, retrying\n",
 			 __func__, pfn_to_page(pfn));
+
+		trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align);
 		/* try again with a bit different memory target */
 		start = bitmap_no + mask + 1;
 	}
diff --git a/mm/filemap.c b/mm/filemap.c
index 843429e..0828d75 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2521,6 +2521,11 @@
 		do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
 	} else if (!page) {
 		/* No page in the page cache at all */
+		struct address_space *mapping = file->f_mapping;
+
+		if (mapping && (mapping->gfp_mask & __GFP_MOVABLE))
+			mapping->gfp_mask |= __GFP_CMA;
+
 		do_sync_mmap_readahead(vmf->vma, ra, file, offset);
 		count_vm_event(PGMAJFAULT);
 		count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
diff --git a/mm/hmm.c b/mm/hmm.c
index 90193a7..57f0d2a 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -945,7 +945,6 @@
 
 	devmem = container_of(ref, struct hmm_devmem, ref);
 	percpu_ref_exit(ref);
-	devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
 }
 
 static void hmm_devmem_ref_kill(void *data)
@@ -956,7 +955,6 @@
 	devmem = container_of(ref, struct hmm_devmem, ref);
 	percpu_ref_kill(ref);
 	wait_for_completion(&devmem->completion);
-	devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
 }
 
 static int hmm_devmem_fault(struct vm_area_struct *vma,
@@ -994,7 +992,7 @@
 	mutex_unlock(&hmm_devmem_lock);
 }
 
-static void hmm_devmem_release(struct device *dev, void *data)
+static void hmm_devmem_release(void *data)
 {
 	struct hmm_devmem *devmem = data;
 	struct resource *resource = devmem->resource;
@@ -1002,11 +1000,6 @@
 	struct zone *zone;
 	struct page *page;
 
-	if (percpu_ref_tryget_live(&devmem->ref)) {
-		dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
-		percpu_ref_put(&devmem->ref);
-	}
-
 	/* pages are dead and unused, undo the arch mapping */
 	start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
 	npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
@@ -1130,19 +1123,6 @@
 	return ret;
 }
 
-static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
-{
-	struct hmm_devmem *devmem = data;
-
-	return devmem->resource == match_data;
-}
-
-static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
-{
-	devres_release(devmem->device, &hmm_devmem_release,
-		       &hmm_devmem_match, devmem->resource);
-}
-
 /*
  * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
  *
@@ -1170,8 +1150,7 @@
 
 	dev_pagemap_get_ops();
 
-	devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
-				   GFP_KERNEL, dev_to_node(device));
+	devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
 	if (!devmem)
 		return ERR_PTR(-ENOMEM);
 
@@ -1185,11 +1164,11 @@
 	ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
 			      0, GFP_KERNEL);
 	if (ret)
-		goto error_percpu_ref;
+		return ERR_PTR(ret);
 
-	ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
+	ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
 	if (ret)
-		goto error_devm_add_action;
+		return ERR_PTR(ret);
 
 	size = ALIGN(size, PA_SECTION_SIZE);
 	addr = min((unsigned long)iomem_resource.end,
@@ -1209,16 +1188,12 @@
 
 		devmem->resource = devm_request_mem_region(device, addr, size,
 							   dev_name(device));
-		if (!devmem->resource) {
-			ret = -ENOMEM;
-			goto error_no_resource;
-		}
+		if (!devmem->resource)
+			return ERR_PTR(-ENOMEM);
 		break;
 	}
-	if (!devmem->resource) {
-		ret = -ERANGE;
-		goto error_no_resource;
-	}
+	if (!devmem->resource)
+		return ERR_PTR(-ERANGE);
 
 	devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
 	devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
@@ -1227,30 +1202,15 @@
 
 	ret = hmm_devmem_pages_create(devmem);
 	if (ret)
-		goto error_pages;
-
-	devres_add(device, devmem);
-
-	ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
-	if (ret) {
-		hmm_devmem_remove(devmem);
 		return ERR_PTR(ret);
-	}
+
+	ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem);
+	if (ret)
+		return ERR_PTR(ret);
 
 	return devmem;
-
-error_pages:
-	devm_release_mem_region(device, devmem->resource->start,
-				resource_size(devmem->resource));
-error_no_resource:
-error_devm_add_action:
-	hmm_devmem_ref_kill(&devmem->ref);
-	hmm_devmem_ref_exit(&devmem->ref);
-error_percpu_ref:
-	devres_free(devmem);
-	return ERR_PTR(ret);
 }
-EXPORT_SYMBOL(hmm_devmem_add);
+EXPORT_SYMBOL_GPL(hmm_devmem_add);
 
 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
 					   struct device *device,
@@ -1264,8 +1224,7 @@
 
 	dev_pagemap_get_ops();
 
-	devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
-				   GFP_KERNEL, dev_to_node(device));
+	devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
 	if (!devmem)
 		return ERR_PTR(-ENOMEM);
 
@@ -1279,12 +1238,12 @@
 	ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
 			      0, GFP_KERNEL);
 	if (ret)
-		goto error_percpu_ref;
+		return ERR_PTR(ret);
 
-	ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
+	ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
+			&devmem->ref);
 	if (ret)
-		goto error_devm_add_action;
-
+		return ERR_PTR(ret);
 
 	devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
 	devmem->pfn_last = devmem->pfn_first +
@@ -1292,58 +1251,20 @@
 
 	ret = hmm_devmem_pages_create(devmem);
 	if (ret)
-		goto error_devm_add_action;
-
-	devres_add(device, devmem);
-
-	ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
-	if (ret) {
-		hmm_devmem_remove(devmem);
 		return ERR_PTR(ret);
-	}
+
+	ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = devm_add_action_or_reset(device, hmm_devmem_ref_kill,
+			&devmem->ref);
+	if (ret)
+		return ERR_PTR(ret);
 
 	return devmem;
-
-error_devm_add_action:
-	hmm_devmem_ref_kill(&devmem->ref);
-	hmm_devmem_ref_exit(&devmem->ref);
-error_percpu_ref:
-	devres_free(devmem);
-	return ERR_PTR(ret);
 }
-EXPORT_SYMBOL(hmm_devmem_add_resource);
-
-/*
- * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
- *
- * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
- *
- * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
- * of the device driver. It will free struct page and remove the resource that
- * reserved the physical address range for this device memory.
- */
-void hmm_devmem_remove(struct hmm_devmem *devmem)
-{
-	resource_size_t start, size;
-	struct device *device;
-	bool cdm = false;
-
-	if (!devmem)
-		return;
-
-	device = devmem->device;
-	start = devmem->resource->start;
-	size = resource_size(devmem->resource);
-
-	cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
-	hmm_devmem_ref_kill(&devmem->ref);
-	hmm_devmem_ref_exit(&devmem->ref);
-	hmm_devmem_pages_remove(devmem);
-
-	if (!cdm)
-		devm_release_mem_region(device, start, size);
-}
-EXPORT_SYMBOL(hmm_devmem_remove);
+EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
 
 /*
  * A device driver that wants to handle multiple devices memory through a
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 15310f1..d2cd70c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2127,23 +2127,25 @@
 	 */
 	old_pmd = pmdp_invalidate(vma, haddr, pmd);
 
-#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 	pmd_migration = is_pmd_migration_entry(old_pmd);
-	if (pmd_migration) {
+	if (unlikely(pmd_migration)) {
 		swp_entry_t entry;
 
 		entry = pmd_to_swp_entry(old_pmd);
 		page = pfn_to_page(swp_offset(entry));
-	} else
-#endif
+		write = is_write_migration_entry(entry);
+		young = false;
+		soft_dirty = pmd_swp_soft_dirty(old_pmd);
+	} else {
 		page = pmd_page(old_pmd);
+		if (pmd_dirty(old_pmd))
+			SetPageDirty(page);
+		write = pmd_write(old_pmd);
+		young = pmd_young(old_pmd);
+		soft_dirty = pmd_soft_dirty(old_pmd);
+	}
 	VM_BUG_ON_PAGE(!page_count(page), page);
 	page_ref_add(page, HPAGE_PMD_NR - 1);
-	if (pmd_dirty(old_pmd))
-		SetPageDirty(page);
-	write = pmd_write(old_pmd);
-	young = pmd_young(old_pmd);
-	soft_dirty = pmd_soft_dirty(old_pmd);
 
 	/*
 	 * Withdraw the table only after we mark the pmd entry invalid.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 309fb8c..10e8367 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4269,7 +4269,8 @@
 				break;
 			}
 			if (ret & VM_FAULT_RETRY) {
-				if (nonblocking)
+				if (nonblocking &&
+				    !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
 					*nonblocking = 0;
 				*nr_pages = 0;
 				/*
diff --git a/mm/memblock.c b/mm/memblock.c
index bec0b05..cb26bcf 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -799,7 +799,8 @@
 	memblock_dbg("   memblock_free: [%pa-%pa] %pF\n",
 		     &base, &end, (void *)_RET_IP_);
 
-	kmemleak_free_part_phys(base, size);
+	if (base < memblock.current_limit)
+		kmemleak_free_part(__va(base), size);
 	return memblock_remove_range(&memblock.reserved, base, size);
 }
 
@@ -1248,7 +1249,9 @@
 		 * The min_count is set to 0 so that memblock allocations are
 		 * never reported as leaks.
 		 */
-		kmemleak_alloc_phys(found, size, 0, 0);
+		if (found < memblock.current_limit)
+			kmemleak_alloc(__va(found), size, 0, 0);
+
 		return found;
 	}
 	return 0;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e79cb59..9518aef 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1666,6 +1666,9 @@
 
 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 {
+	enum oom_status ret;
+	bool locked;
+
 	if (order > PAGE_ALLOC_COSTLY_ORDER)
 		return OOM_SKIPPED;
 
@@ -1698,10 +1701,23 @@
 		return OOM_ASYNC;
 	}
 
-	if (mem_cgroup_out_of_memory(memcg, mask, order))
-		return OOM_SUCCESS;
+	mem_cgroup_mark_under_oom(memcg);
 
-	return OOM_FAILED;
+	locked = mem_cgroup_oom_trylock(memcg);
+
+	if (locked)
+		mem_cgroup_oom_notify(memcg);
+
+	mem_cgroup_unmark_under_oom(memcg);
+	if (mem_cgroup_out_of_memory(memcg, mask, order))
+		ret = OOM_SUCCESS;
+	else
+		ret = OOM_FAILED;
+
+	if (locked)
+		mem_cgroup_oom_unlock(memcg);
+
+	return ret;
 }
 
 /**
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 0cd3de3..d9b8a24 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -372,7 +372,8 @@
 			if (fail || tk->addr_valid == 0) {
 				pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
 				       pfn, tk->tsk->comm, tk->tsk->pid);
-				force_sig(SIGKILL, tk->tsk);
+				do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
+						 tk->tsk, PIDTYPE_PID);
 			}
 
 			/*
diff --git a/mm/memory.c b/mm/memory.c
index cf84116..58ff2c6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3237,6 +3237,29 @@
 	struct vm_area_struct *vma = vmf->vma;
 	vm_fault_t ret;
 
+	/*
+	 * Preallocate pte before we take page_lock because this might lead to
+	 * deadlocks for memcg reclaim which waits for pages under writeback:
+	 *				lock_page(A)
+	 *				SetPageWriteback(A)
+	 *				unlock_page(A)
+	 * lock_page(B)
+	 *				lock_page(B)
+	 * pte_alloc_pne
+	 *   shrink_page_list
+	 *     wait_on_page_writeback(A)
+	 *				SetPageWriteback(B)
+	 *				unlock_page(B)
+	 *				# flush A, B to clear the writeback
+	 */
+	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
+		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
+						  vmf->address);
+		if (!vmf->prealloc_pte)
+			return VM_FAULT_OOM;
+		smp_wmb(); /* See comment in __pte_alloc() */
+	}
+
 	ret = vma->vm_ops->fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
 			    VM_FAULT_DONE_COW)))
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 4f1610c..37c5c51 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -36,6 +36,7 @@
 #include <linux/bootmem.h>
 #include <linux/compaction.h>
 #include <linux/device.h>
+#include <linux/rmap.h>
 
 #include <asm/tlbflush.h>
 
@@ -1399,23 +1400,27 @@
 static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
 {
 	unsigned long pfn;
-	struct page *page;
+
 	for (pfn = start; pfn < end; pfn++) {
-		if (pfn_valid(pfn)) {
-			page = pfn_to_page(pfn);
-			if (PageLRU(page))
-				return pfn;
-			if (__PageMovable(page))
-				return pfn;
-			if (PageHuge(page)) {
-				if (hugepage_migration_supported(page_hstate(page)) &&
-				    page_huge_active(page))
-					return pfn;
-				else
-					pfn = round_up(pfn + 1,
-						1 << compound_order(page)) - 1;
-			}
-		}
+		struct page *page, *head;
+		unsigned long skip;
+
+		if (!pfn_valid(pfn))
+			continue;
+		page = pfn_to_page(pfn);
+		if (PageLRU(page))
+			return pfn;
+		if (__PageMovable(page))
+			return pfn;
+
+		if (!PageHuge(page))
+			continue;
+		head = compound_head(page);
+		if (hugepage_migration_supported(page_hstate(head)) &&
+		    page_huge_active(head))
+			return pfn;
+		skip = (1 << compound_order(head)) - (page - head);
+		pfn += skip - 1;
 	}
 	return 0;
 }
@@ -1467,6 +1472,21 @@
 			pfn = page_to_pfn(compound_head(page))
 				+ hpage_nr_pages(page) - 1;
 
+		/*
+		 * HWPoison pages have elevated reference counts so the migration would
+		 * fail on them. It also doesn't make any sense to migrate them in the
+		 * first place. Still try to unmap such a page in case it is still mapped
+		 * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
+		 * the unmap as the catch all safety net).
+		 */
+		if (PageHWPoison(page)) {
+			if (WARN_ON(PageLRU(page)))
+				isolate_lru_page(page);
+			if (page_mapped(page))
+				try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
+			continue;
+		}
+
 		if (!get_page_unless_zero(page))
 			continue;
 		/*
diff --git a/mm/migrate.c b/mm/migrate.c
index 84381b5..e6120ae 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1118,10 +1118,13 @@
 	 * If migration is successful, decrease refcount of the newpage
 	 * which will not free the page because new page owner increased
 	 * refcounter. As well, if it is LRU page, add the page to LRU
-	 * list in here.
+	 * list in here. Use the old state of the isolated source page to
+	 * determine if we migrated a LRU page. newpage was already unlocked
+	 * and possibly modified by its owner - don't rely on the page
+	 * state.
 	 */
 	if (rc == MIGRATEPAGE_SUCCESS) {
-		if (unlikely(__PageMovable(newpage)))
+		if (unlikely(!is_lru))
 			put_page(newpage);
 		else
 			putback_lru_page(newpage);
@@ -1382,6 +1385,8 @@
 	int swapwrite = current->flags & PF_SWAPWRITE;
 	int rc;
 
+	trace_mm_migrate_pages_start(mode, reason);
+
 	if (!swapwrite)
 		current->flags |= PF_SWAPWRITE;
 
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index dde2ee6..e56b072 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -647,8 +647,8 @@
 	 */
 	spin_lock(&oom_reaper_lock);
 
-	/* tsk is already queued? */
-	if (tsk == oom_reaper_list || tsk->oom_reaper_list) {
+	/* mm is already queued? */
+	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) {
 		spin_unlock(&oom_reaper_lock);
 		return;
 	}
@@ -981,6 +981,13 @@
 	 * still freeing memory.
 	 */
 	read_lock(&tasklist_lock);
+
+	/*
+	 * The task 'p' might have already exited before reaching here. The
+	 * put_task_struct() will free task_struct 'p' while the loop still try
+	 * to access the field of 'p', so, get an extra reference.
+	 */
+	get_task_struct(p);
 	for_each_thread(p, t) {
 		list_for_each_entry(child, &t->children, sibling) {
 			unsigned int child_points;
@@ -1000,6 +1007,7 @@
 			}
 		}
 	}
+	put_task_struct(p);
 	read_unlock(&tasklist_lock);
 
 	/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 84ae9bf..ea4fd3a 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2156,6 +2156,7 @@
 {
 	int ret = 0;
 	int done = 0;
+	int error;
 	struct pagevec pvec;
 	int nr_pages;
 	pgoff_t uninitialized_var(writeback_index);
@@ -2236,25 +2237,31 @@
 				goto continue_unlock;
 
 			trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
-			ret = (*writepage)(page, wbc, data);
-			if (unlikely(ret)) {
-				if (ret == AOP_WRITEPAGE_ACTIVATE) {
+			error = (*writepage)(page, wbc, data);
+			if (unlikely(error)) {
+				/*
+				 * Handle errors according to the type of
+				 * writeback. There's no need to continue for
+				 * background writeback. Just push done_index
+				 * past this page so media errors won't choke
+				 * writeout for the entire file. For integrity
+				 * writeback, we must process the entire dirty
+				 * set regardless of errors because the fs may
+				 * still have state to clear for each page. In
+				 * that case we continue processing and return
+				 * the first error.
+				 */
+				if (error == AOP_WRITEPAGE_ACTIVATE) {
 					unlock_page(page);
-					ret = 0;
-				} else {
-					/*
-					 * done_index is set past this page,
-					 * so media errors will not choke
-					 * background writeout for the entire
-					 * file. This has consequences for
-					 * range_cyclic semantics (ie. it may
-					 * not be suitable for data integrity
-					 * writeout).
-					 */
+					error = 0;
+				} else if (wbc->sync_mode != WB_SYNC_ALL) {
+					ret = error;
 					done_index = page->index + 1;
 					done = 1;
 					break;
 				}
+				if (!ret)
+					ret = error;
 			}
 
 			/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ed57047..ddf259b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1913,9 +1913,9 @@
 	set_page_refcounted(page);
 
 	arch_alloc_page(page, order);
+	kasan_alloc_pages(page, order);
 	kernel_map_pages(page, 1 << order, 1);
 	kernel_poison_pages(page, 1 << order, 1);
-	kasan_alloc_pages(page, order);
 	set_page_owner(page, order, gfp_flags);
 }
 
@@ -7845,11 +7845,14 @@
 		 * handle each tail page individually in migration.
 		 */
 		if (PageHuge(page)) {
+			struct page *head = compound_head(page);
+			unsigned int skip_pages;
 
-			if (!hugepage_migration_supported(page_hstate(page)))
+			if (!hugepage_migration_supported(page_hstate(head)))
 				goto unmovable;
 
-			iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
+			skip_pages = (1 << compound_order(head)) - (page - head);
+			iter += skip_pages - 1;
 			continue;
 		}
 
diff --git a/mm/shmem.c b/mm/shmem.c
index e994935..645885c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2171,20 +2171,21 @@
 {
 	struct shmem_inode_info *info = SHMEM_I(file_inode(file));
 
-	/*
-	 * New PROT_READ and MAP_SHARED mmaps are not allowed when "future
-	 * write" seal active.
-	 */
-	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE) &&
-	    (info->seals & F_SEAL_FUTURE_WRITE))
-		return -EPERM;
+	if (info->seals & F_SEAL_FUTURE_WRITE) {
+		/*
+		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+		 * "future write" seal active.
+		 */
+		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+			return -EPERM;
 
-	/*
-	 * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED read-only
-	 * mapping, take care to not allow mprotect to revert protections.
-	 */
-	if (info->seals & F_SEAL_FUTURE_WRITE)
+		/*
+		 * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED
+		 * read-only mapping, take care to not allow mprotect to revert
+		 * protections.
+		 */
 		vma->vm_flags &= ~(VM_MAYWRITE);
+	}
 
 	file_accessed(file);
 	vma->vm_ops = &shmem_vm_ops;
diff --git a/mm/slab.c b/mm/slab.c
index d73c7a4..fad6839 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -679,8 +679,10 @@
 	struct alien_cache *alc = NULL;
 
 	alc = kmalloc_node(memsize, gfp, node);
-	init_arraycache(&alc->ac, entries, batch);
-	spin_lock_init(&alc->lock);
+	if (alc) {
+		init_arraycache(&alc->ac, entries, batch);
+		spin_lock_init(&alc->lock);
+	}
 	return alc;
 }
 
diff --git a/mm/slub.c b/mm/slub.c
index 0fc48b4..eacb2b24 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1693,6 +1693,7 @@
 	if (current->reclaim_state)
 		current->reclaim_state->reclaimed_slab += pages;
 	memcg_uncharge_slab(page, order, s);
+	kasan_alloc_pages(page, order);
 	__free_pages(page, order);
 }
 
@@ -3912,6 +3913,7 @@
 	if (unlikely(!PageSlab(page))) {
 		BUG_ON(!PageCompound(page));
 		kfree_hook(object);
+		kasan_alloc_pages(page, compound_order(page));
 		__free_pages(page, compound_order(page));
 		return;
 	}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 380d259..f09534f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2229,7 +2229,8 @@
 		 */
 		if (PageSwapCache(page) &&
 		    likely(page_private(page) == entry.val) &&
-		    !page_swapped(page))
+		    (!PageTransCompound(page) ||
+		     !swap_page_trans_huge_swapped(si, entry)))
 			delete_from_swap_cache(compound_head(page));
 
 		/*
@@ -2840,8 +2841,9 @@
 	struct swap_info_struct *p;
 	unsigned int type;
 	int i;
+	int size = sizeof(*p) + nr_node_ids * sizeof(struct plist_node);
 
-	p = kvzalloc(sizeof(*p), GFP_KERNEL);
+	p = kvzalloc(size, GFP_KERNEL);
 	if (!p)
 		return ERR_PTR(-ENOMEM);
 
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 0293645..51411f9 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -247,7 +247,8 @@
 /*
  * Validates that the given object is:
  * - not bogus address
- * - known-safe heap or stack object
+ * - fully contained by stack (or stack frame, when available)
+ * - fully within SLAB object (or object whitelist area, when available)
  * - not in kernel text
  */
 void __check_object_size(const void *ptr, unsigned long n, bool to_user)
@@ -262,9 +263,6 @@
 	/* Check for invalid addresses. */
 	check_bogus_address((const unsigned long)ptr, n, to_user);
 
-	/* Check for bad heap object. */
-	check_heap_object(ptr, n, to_user);
-
 	/* Check for bad stack object. */
 	switch (check_stack_object(ptr, n)) {
 	case NOT_STACK:
@@ -282,6 +280,9 @@
 		usercopy_abort("process stack", NULL, to_user, 0, n);
 	}
 
+	/* Check for bad heap object. */
+	check_heap_object(ptr, n, to_user);
+
 	/* Check for object in kernel to avoid text exposure. */
 	check_kernel_text_object((const unsigned long)ptr, n, to_user);
 }
diff --git a/mm/util.c b/mm/util.c
index 9e3ebd2..6a24a10 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -485,7 +485,7 @@
 		return true;
 	if (PageHuge(page))
 		return false;
-	for (i = 0; i < hpage_nr_pages(page); i++) {
+	for (i = 0; i < (1 << compound_order(page)); i++) {
 		if (atomic_read(&page[i]._mapcount) >= 0)
 			return true;
 	}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index edbeb4d..5e0c893 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -458,6 +458,10 @@
 	long batch_size = shrinker->batch ? shrinker->batch
 					  : SHRINK_BATCH;
 	long scanned = 0, next_deferred;
+	long min_cache_size = batch_size;
+
+	if (current_is_kswapd())
+		min_cache_size = 0;
 
 	if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
 		nid = 0;
@@ -538,7 +542,7 @@
 	 * scanning at high prio and therefore should try to reclaim as much as
 	 * possible.
 	 */
-	while (total_scan >= batch_size ||
+	while (total_scan > min_cache_size ||
 	       total_scan >= freeable) {
 		unsigned long ret;
 		unsigned long nr_to_scan = min(batch_size, total_scan);
@@ -2377,7 +2381,8 @@
 	 * lruvec even if it has plenty of old anonymous pages unless the
 	 * system is under heavy pressure.
 	 */
-	if (!inactive_list_is_low(lruvec, true, memcg, sc, false) &&
+	if (!IS_ENABLED(CONFIG_BALANCE_ANON_FILE_RECLAIM) &&
+	    !inactive_list_is_low(lruvec, true, memcg, sc, false) &&
 	    lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
 		scan_balance = SCAN_FILE;
 		goto out;
@@ -2459,9 +2464,11 @@
 			/*
 			 * Scan types proportional to swappiness and
 			 * their relative recent reclaim efficiency.
+			 * Make sure we don't miss the last page
+			 * because of a round-off error.
 			 */
-			scan = div64_u64(scan * fraction[file],
-					 denominator);
+			scan = DIV64_U64_ROUND_UP(scan * fraction[file],
+						  denominator);
 			break;
 		case SCAN_FILE:
 		case SCAN_ANON:
diff --git a/net/9p/client.c b/net/9p/client.c
index deae53a..75b7bf7 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -181,6 +181,12 @@
 				ret = r;
 				continue;
 			}
+			if (option < 4096) {
+				p9_debug(P9_DEBUG_ERROR,
+					 "msize should be at least 4k\n");
+				ret = -EINVAL;
+				continue;
+			}
 			clnt->msize = option;
 			break;
 		case Opt_trans:
@@ -993,10 +999,18 @@
 	else if (!strncmp(version, "9P2000", 6))
 		c->proto_version = p9_proto_legacy;
 	else {
+		p9_debug(P9_DEBUG_ERROR,
+			 "server returned an unknown version: %s\n", version);
 		err = -EREMOTEIO;
 		goto error;
 	}
 
+	if (msize < 4096) {
+		p9_debug(P9_DEBUG_ERROR,
+			 "server returned a msize < 4096: %d\n", msize);
+		err = -EREMOTEIO;
+		goto error;
+	}
 	if (msize < c->msize)
 		c->msize = msize;
 
@@ -1055,6 +1069,13 @@
 	if (clnt->msize > clnt->trans_mod->maxsize)
 		clnt->msize = clnt->trans_mod->maxsize;
 
+	if (clnt->msize < 4096) {
+		p9_debug(P9_DEBUG_ERROR,
+			 "Please specify a msize of at least 4k\n");
+		err = -EINVAL;
+		goto free_client;
+	}
+
 	err = p9_client_version(clnt);
 	if (err)
 		goto close_trans;
diff --git a/net/Kconfig b/net/Kconfig
index 56bf7db..f46a913 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -329,6 +329,15 @@
 	  with many clients some protection against DoS by a single (spoofed)
 	  flow that greatly exceeds average workload.
 
+config SOCKEV_NLMCAST
+	bool "Enable SOCKEV Netlink Multicast"
+	default n
+	help
+	  Default client for SOCKEV notifier events. Sends multicast netlink
+	  messages whenever the socket event notifier is invoked. Enable if
+	  user space entities need to be notified of socket events without
+	  having to poll /proc
+
 menu "Network testing"
 
 config NET_PKTGEN
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index c603d33..5d01edf 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -653,15 +653,22 @@
 			break;
 		}
 
-		dev = dev_get_by_name(&init_net, devname);
+		rtnl_lock();
+		dev = __dev_get_by_name(&init_net, devname);
 		if (!dev) {
+			rtnl_unlock();
 			res = -ENODEV;
 			break;
 		}
 
 		ax25->ax25_dev = ax25_dev_ax25dev(dev);
+		if (!ax25->ax25_dev) {
+			rtnl_unlock();
+			res = -ENODEV;
+			break;
+		}
 		ax25_fillin_cb(ax25, ax25->ax25_dev);
-		dev_put(dev);
+		rtnl_unlock();
 		break;
 
 	default:
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 9a3a301..d92195c 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -116,6 +116,7 @@
 	if ((s = ax25_dev_list) == ax25_dev) {
 		ax25_dev_list = s->next;
 		spin_unlock_bh(&ax25_dev_lock);
+		dev->ax25_ptr = NULL;
 		dev_put(dev);
 		kfree(ax25_dev);
 		return;
@@ -125,6 +126,7 @@
 		if (s->next == ax25_dev) {
 			s->next = ax25_dev->next;
 			spin_unlock_bh(&ax25_dev_lock);
+			dev->ax25_ptr = NULL;
 			dev_put(dev);
 			kfree(ax25_dev);
 			return;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index f12555f..7f800c3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -5668,6 +5668,12 @@
 		return true;
 	}
 
+	/* Check if request ended in Command Status - no way to retreive
+	 * any extra parameters in this case.
+	 */
+	if (hdr->evt == HCI_EV_CMD_STATUS)
+		return false;
+
 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
 			   hdr->evt);
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 5372e20..48ddc60 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -36,10 +36,10 @@
 
 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	skb_push(skb, ETH_HLEN);
 	if (!is_skb_forwardable(skb->dev, skb))
 		goto drop;
 
-	skb_push(skb, ETH_HLEN);
 	br_drop_fake_rtable(skb);
 
 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
@@ -65,6 +65,7 @@
 
 int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	skb->tstamp = 0;
 	return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
 		       net, sk, skb, NULL, skb->dev,
 		       br_dev_queue_push_xmit);
@@ -97,12 +98,11 @@
 		net = dev_net(indev);
 	} else {
 		if (unlikely(netpoll_tx_running(to->br->dev))) {
-			if (!is_skb_forwardable(skb->dev, skb)) {
+			skb_push(skb, ETH_HLEN);
+			if (!is_skb_forwardable(skb->dev, skb))
 				kfree_skb(skb);
-			} else {
-				skb_push(skb, ETH_HLEN);
+			else
 				br_netpoll_send_skb(to, skb);
-			}
 			return;
 		}
 		br_hook = NF_BR_LOCAL_OUT;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 37278dc..e07a7e6 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -278,7 +278,7 @@
 		struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
 		int ret;
 
-		if (neigh->hh.hh_len) {
+		if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
 			neigh_hh_bridge(&neigh->hh, skb);
 			skb->dev = nf_bridge->physindev;
 			ret = br_handle_frame_finish(net, sk, skb);
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 96c072e..5811208 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -131,6 +131,7 @@
 					IPSTATS_MIB_INDISCARDS);
 			goto drop;
 		}
+		hdr = ipv6_hdr(skb);
 	}
 	if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
 		goto drop;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 4918287..5e55cef 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1137,14 +1137,16 @@
 	tmp.name[sizeof(tmp.name) - 1] = 0;
 
 	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
-	newinfo = vmalloc(sizeof(*newinfo) + countersize);
+	newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
+			    PAGE_KERNEL);
 	if (!newinfo)
 		return -ENOMEM;
 
 	if (countersize)
 		memset(newinfo->counters, 0, countersize);
 
-	newinfo->entries = vmalloc(tmp.entries_size);
+	newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
+				     PAGE_KERNEL);
 	if (!newinfo->entries) {
 		ret = -ENOMEM;
 		goto free_newinfo;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 08cbed7..419e8ed 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -229,6 +229,7 @@
 	    pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
 		return false;
 
+	ip6h = ipv6_hdr(skb);
 	thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
 	if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
 		return false;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 0af8f0d..79bb8af 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -67,6 +67,9 @@
  */
 #define MAX_NFRAMES 256
 
+/* limit timers to 400 days for sending/timeouts */
+#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
+
 /* use of last_frames[index].flags */
 #define RX_RECV    0x40 /* received data for this element */
 #define RX_THR     0x80 /* element not been sent due to throttle feature */
@@ -140,6 +143,22 @@
 	return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
 }
 
+/* check limitations for timeval provided by user */
+static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
+{
+	if ((msg_head->ival1.tv_sec < 0) ||
+	    (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
+	    (msg_head->ival1.tv_usec < 0) ||
+	    (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
+	    (msg_head->ival2.tv_sec < 0) ||
+	    (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
+	    (msg_head->ival2.tv_usec < 0) ||
+	    (msg_head->ival2.tv_usec >= USEC_PER_SEC))
+		return true;
+
+	return false;
+}
+
 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
 #define OPSIZ sizeof(struct bcm_op)
 #define MHSIZ sizeof(struct bcm_msg_head)
@@ -873,6 +892,10 @@
 	if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
 		return -EINVAL;
 
+	/* check timeval limitations */
+	if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
+		return -EINVAL;
+
 	/* check the given can_id */
 	op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
 	if (op) {
@@ -1053,6 +1076,10 @@
 	     (!(msg_head->can_id & CAN_RTR_FLAG))))
 		return -EINVAL;
 
+	/* check timeval limitations */
+	if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
+		return -EINVAL;
+
 	/* check the given can_id */
 	op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
 	if (op) {
diff --git a/net/can/gw.c b/net/can/gw.c
index faa3da8..53859346 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -416,13 +416,29 @@
 	while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
 		(*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
 
-	/* check for checksum updates when the CAN frame has been modified */
+	/* Has the CAN frame been modified? */
 	if (modidx) {
-		if (gwj->mod.csumfunc.crc8)
-			(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
+		/* get available space for the processed CAN frame type */
+		int max_len = nskb->len - offsetof(struct can_frame, data);
 
-		if (gwj->mod.csumfunc.xor)
+		/* dlc may have changed, make sure it fits to the CAN frame */
+		if (cf->can_dlc > max_len)
+			goto out_delete;
+
+		/* check for checksum updates in classic CAN length only */
+		if (gwj->mod.csumfunc.crc8) {
+			if (cf->can_dlc > 8)
+				goto out_delete;
+
+			(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
+		}
+
+		if (gwj->mod.csumfunc.xor) {
+			if (cf->can_dlc > 8)
+				goto out_delete;
+
 			(*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
+		}
 	}
 
 	/* clear the skb timestamp if not configured the other way */
@@ -434,6 +450,14 @@
 		gwj->dropped_frames++;
 	else
 		gwj->handled_frames++;
+
+	return;
+
+ out_delete:
+	/* delete frame due to misconfiguration */
+	gwj->deleted_frames++;
+	kfree_skb(nskb);
+	return;
 }
 
 static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
diff --git a/net/compat.c b/net/compat.c
index 3b2105f..3c4b028 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -467,12 +467,14 @@
 	ctv = (struct compat_timeval __user *) userstamp;
 	err = -ENOENT;
 	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
-	tv = ktime_to_timeval(sk->sk_stamp);
+	tv = ktime_to_timeval(sock_read_timestamp(sk));
+
 	if (tv.tv_sec == -1)
 		return err;
 	if (tv.tv_sec == 0) {
-		sk->sk_stamp = ktime_get_real();
-		tv = ktime_to_timeval(sk->sk_stamp);
+		ktime_t kt = ktime_get_real();
+		sock_write_timestamp(sk, kt);
+		tv = ktime_to_timeval(kt);
 	}
 	err = 0;
 	if (put_user(tv.tv_sec, &ctv->tv_sec) ||
@@ -494,12 +496,13 @@
 	ctv = (struct compat_timespec __user *) userstamp;
 	err = -ENOENT;
 	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
-	ts = ktime_to_timespec(sk->sk_stamp);
+	ts = ktime_to_timespec(sock_read_timestamp(sk));
 	if (ts.tv_sec == -1)
 		return err;
 	if (ts.tv_sec == 0) {
-		sk->sk_stamp = ktime_get_real();
-		ts = ktime_to_timespec(sk->sk_stamp);
+		ktime_t kt = ktime_get_real();
+		sock_write_timestamp(sk, kt);
+		ts = ktime_to_timespec(kt);
 	}
 	err = 0;
 	if (put_user(ts.tv_sec, &ctv->tv_sec) ||
diff --git a/net/core/Makefile b/net/core/Makefile
index 80175e6..a38e0d1 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -27,6 +27,7 @@
 obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
 obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
 obj-$(CONFIG_LWTUNNEL_BPF) += lwt_bpf.o
+obj-$(CONFIG_SOCKEV_NLMCAST) += sockev_nlmcast.o
 obj-$(CONFIG_DST_CACHE) += dst_cache.o
 obj-$(CONFIG_HWBM) += hwbm.o
 obj-$(CONFIG_NET_DEVLINK) += devlink.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 44ccab0..6ac9603 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -8629,6 +8629,9 @@
 	set_bit(__LINK_STATE_PRESENT, &dev->state);
 	set_bit(__LINK_STATE_START, &dev->state);
 
+	/* napi_busy_loop stats accounting wants this */
+	dev_net_set(dev, &init_net);
+
 	/* Note : We dont allocate pcpu_refcnt for dummy devices,
 	 * because users of this 'device' dont need to change
 	 * its refcount.
diff --git a/net/core/filter.c b/net/core/filter.c
index 5e00f2b..8c2411f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2018,18 +2018,19 @@
 static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
 				 u32 flags)
 {
-	/* skb->mac_len is not set on normal egress */
-	unsigned int mlen = skb->network_header - skb->mac_header;
+	unsigned int mlen = skb_network_offset(skb);
 
-	__skb_pull(skb, mlen);
+	if (mlen) {
+		__skb_pull(skb, mlen);
 
-	/* At ingress, the mac header has already been pulled once.
-	 * At egress, skb_pospull_rcsum has to be done in case that
-	 * the skb is originated from ingress (i.e. a forwarded skb)
-	 * to ensure that rcsum starts at net header.
-	 */
-	if (!skb_at_tc_ingress(skb))
-		skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+		/* At ingress, the mac header has already been pulled once.
+		 * At egress, skb_pospull_rcsum has to be done in case that
+		 * the skb is originated from ingress (i.e. a forwarded skb)
+		 * to ensure that rcsum starts at net header.
+		 */
+		if (!skb_at_tc_ingress(skb))
+			skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
+	}
 	skb_pop_mac_header(skb);
 	skb_reset_mac_len(skb);
 	return flags & BPF_F_INGRESS ?
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index 4b54e5f..acf45dd 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -84,6 +84,7 @@
 	for_each_possible_cpu(i) {
 		struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
 
+		napi_disable(&cell->napi);
 		netif_napi_del(&cell->napi);
 		__skb_queue_purge(&cell->napi_skbs);
 	}
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 3e85437..a648568 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -63,6 +63,7 @@
 				     lwt->name ? : "<unknown>");
 			ret = BPF_OK;
 		} else {
+			skb_reset_mac_header(skb);
 			ret = skb_do_redirect(skb);
 			if (ret == 0)
 				ret = BPF_REDIRECT;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index abbbd7f..589ec5b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -5258,7 +5258,6 @@
 	unsigned long chunk;
 	struct sk_buff *skb;
 	struct page *page;
-	gfp_t gfp_head;
 	int i;
 
 	*errcode = -EMSGSIZE;
@@ -5268,12 +5267,8 @@
 	if (npages > MAX_SKB_FRAGS)
 		return NULL;
 
-	gfp_head = gfp_mask;
-	if (gfp_head & __GFP_DIRECT_RECLAIM)
-		gfp_head |= __GFP_RETRY_MAYFAIL;
-
 	*errcode = -ENOBUFS;
-	skb = alloc_skb(header_len, gfp_head);
+	skb = alloc_skb(header_len, gfp_mask);
 	if (!skb)
 		return NULL;
 
diff --git a/net/core/sock.c b/net/core/sock.c
index 748765e..c9668dc 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -698,6 +698,7 @@
 		break;
 	case SO_DONTROUTE:
 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
+		sk_dst_reset(sk);
 		break;
 	case SO_BROADCAST:
 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
@@ -2803,6 +2804,9 @@
 	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
 
 	sk->sk_stamp = SK_DEFAULT_STAMP;
+#if BITS_PER_LONG==32
+	seqlock_init(&sk->sk_stamp_seq);
+#endif
 	atomic_set(&sk->sk_zckey, 0);
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
@@ -2902,12 +2906,13 @@
 	struct timeval tv;
 
 	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
-	tv = ktime_to_timeval(sk->sk_stamp);
+	tv = ktime_to_timeval(sock_read_timestamp(sk));
 	if (tv.tv_sec == -1)
 		return -ENOENT;
 	if (tv.tv_sec == 0) {
-		sk->sk_stamp = ktime_get_real();
-		tv = ktime_to_timeval(sk->sk_stamp);
+		ktime_t kt = ktime_get_real();
+		sock_write_timestamp(sk, kt);
+		tv = ktime_to_timeval(kt);
 	}
 	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
 }
@@ -2918,11 +2923,12 @@
 	struct timespec ts;
 
 	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
-	ts = ktime_to_timespec(sk->sk_stamp);
+	ts = ktime_to_timespec(sock_read_timestamp(sk));
 	if (ts.tv_sec == -1)
 		return -ENOENT;
 	if (ts.tv_sec == 0) {
-		sk->sk_stamp = ktime_get_real();
+		ktime_t kt = ktime_get_real();
+		sock_write_timestamp(sk, kt);
 		ts = ktime_to_timespec(sk->sk_stamp);
 	}
 	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
diff --git a/net/core/sockev_nlmcast.c b/net/core/sockev_nlmcast.c
new file mode 100644
index 0000000..230f8ab
--- /dev/null
+++ b/net/core/sockev_nlmcast.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2014-2015, 2017-2019, The Linux Foundation. All rights reserved. */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/netlink.h>
+#include <linux/sockev.h>
+#include <net/sock.h>
+
+static int registration_status;
+static struct sock *socknlmsgsk;
+
+static void sockev_skmsg_recv(struct sk_buff *skb)
+{
+	pr_debug("%s(): Got unsolicited request\n", __func__);
+}
+
+static struct netlink_kernel_cfg nlcfg = {
+	.input = sockev_skmsg_recv
+};
+
+static void _sockev_event(unsigned long event, __u8 *evstr, int buflen)
+{
+	switch (event) {
+	case SOCKEV_SOCKET:
+		strlcpy(evstr, "SOCKEV_SOCKET", buflen);
+		break;
+	case SOCKEV_BIND:
+		strlcpy(evstr, "SOCKEV_BIND", buflen);
+		break;
+	case SOCKEV_LISTEN:
+		strlcpy(evstr, "SOCKEV_LISTEN", buflen);
+		break;
+	case SOCKEV_ACCEPT:
+		strlcpy(evstr, "SOCKEV_ACCEPT", buflen);
+		break;
+	case SOCKEV_CONNECT:
+		strlcpy(evstr, "SOCKEV_CONNECT", buflen);
+		break;
+	case SOCKEV_SHUTDOWN:
+		strlcpy(evstr, "SOCKEV_SHUTDOWN", buflen);
+		break;
+	default:
+		strlcpy(evstr, "UNKNOWN", buflen);
+	}
+}
+
+static int sockev_client_cb(struct notifier_block *nb,
+			    unsigned long event, void *data)
+{
+	struct sk_buff *skb;
+	struct nlmsghdr *nlh;
+	struct sknlsockevmsg *smsg;
+	struct socket *sock;
+	struct sock *sk;
+
+	sock = (struct socket *)data;
+	if (!socknlmsgsk || !sock)
+		goto done;
+
+	sk = sock->sk;
+	if (!sk)
+		goto done;
+
+	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
+		goto done;
+
+	if (event != SOCKEV_BIND && event != SOCKEV_LISTEN)
+		goto done;
+
+	skb = nlmsg_new(sizeof(struct sknlsockevmsg), GFP_KERNEL);
+	if (!skb)
+		goto done;
+
+	nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct sknlsockevmsg), 0);
+	if (!nlh) {
+		kfree_skb(skb);
+		goto done;
+	}
+
+	NETLINK_CB(skb).dst_group = SKNLGRP_SOCKEV;
+
+	smsg = nlmsg_data(nlh);
+	memset(smsg, 0, sizeof(struct sknlsockevmsg));
+	smsg->pid = current->pid;
+	_sockev_event(event, smsg->event, sizeof(smsg->event));
+	smsg->skfamily = sk->sk_family;
+	smsg->skstate = sk->sk_state;
+	smsg->skprotocol = sk->sk_protocol;
+	smsg->sktype = sk->sk_type;
+	smsg->skflags = sk->sk_flags;
+	nlmsg_notify(socknlmsgsk, skb, 0, SKNLGRP_SOCKEV, 0, GFP_KERNEL);
+done:
+	return 0;
+}
+
+static struct notifier_block sockev_notifier_client = {
+	.notifier_call = sockev_client_cb,
+	.next = 0,
+	.priority = 0
+};
+
+/* ***************** Startup/Shutdown *************************************** */
+
+static int __init sockev_client_init(void)
+{
+	int rc;
+
+	registration_status = 1;
+	rc = sockev_register_notify(&sockev_notifier_client);
+	if (rc != 0) {
+		registration_status = 0;
+		pr_err("%s(): Failed to register cb (%d)\n", __func__, rc);
+	}
+	socknlmsgsk = netlink_kernel_create(&init_net, NETLINK_SOCKEV, &nlcfg);
+	if (!socknlmsgsk) {
+		pr_err("%s(): Failed to initialize netlink socket\n", __func__);
+		if (registration_status)
+			sockev_unregister_notify(&sockev_notifier_client);
+		registration_status = 0;
+	}
+
+	return rc;
+}
+
+static void __exit sockev_client_exit(void)
+{
+	if (registration_status)
+		sockev_unregister_notify(&sockev_notifier_client);
+}
+
+module_init(sockev_client_init)
+module_exit(sockev_client_exit)
+MODULE_LICENSE("GPL v2");
+
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index ca53efa..8bec827 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -48,6 +48,9 @@
 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
 	struct neighbour *n;
 
+	if (!daddr)
+		return -EINVAL;
+
 	/* TODO:
 	 * if this package isn't ipv6 one, where should it be routed?
 	 */
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 0113993..958e185 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -203,7 +203,7 @@
 		struct fib_table *tb;
 
 		hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
-			flushed += fib_table_flush(net, tb);
+			flushed += fib_table_flush(net, tb, false);
 	}
 
 	if (flushed)
@@ -1357,7 +1357,7 @@
 
 		hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
 			hlist_del(&tb->tb_hlist);
-			fib_table_flush(net, tb);
+			fib_table_flush(net, tb, true);
 			fib_free_table(tb);
 		}
 	}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 5bc0c89..3955a6d 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1856,7 +1856,7 @@
 }
 
 /* Caller must hold RTNL. */
-int fib_table_flush(struct net *net, struct fib_table *tb)
+int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
 {
 	struct trie *t = (struct trie *)tb->tb_data;
 	struct key_vector *pn = t->kv;
@@ -1904,8 +1904,17 @@
 		hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
 			struct fib_info *fi = fa->fa_info;
 
-			if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
-			    tb->tb_id != fa->tb_id) {
+			if (!fi || tb->tb_id != fa->tb_id ||
+			    (!(fi->fib_flags & RTNH_F_DEAD) &&
+			     !fib_props[fa->fa_type].error)) {
+				slen = fa->fa_slen;
+				continue;
+			}
+
+			/* Do not flush error routes if network namespace is
+			 * not being dismantled
+			 */
+			if (!flush_all && fib_props[fa->fa_type].error) {
 				slen = fa->fa_slen;
 				continue;
 			}
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index b798862..f21ea61 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -25,6 +25,7 @@
 #include <linux/spinlock.h>
 #include <net/protocol.h>
 #include <net/gre.h>
+#include <net/erspan.h>
 
 #include <net/icmp.h>
 #include <net/route.h>
@@ -118,6 +119,22 @@
 			hdr_len += 4;
 	}
 	tpi->hdr_len = hdr_len;
+
+	/* ERSPAN ver 1 and 2 protocol sets GRE key field
+	 * to 0 and sets the configured key in the
+	 * inner erspan header field
+	 */
+	if (greh->protocol == htons(ETH_P_ERSPAN) ||
+	    greh->protocol == htons(ETH_P_ERSPAN2)) {
+		struct erspan_base_hdr *ershdr;
+
+		if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
+			return -EINVAL;
+
+		ershdr = (struct erspan_base_hdr *)options;
+		tpi->key = cpu_to_be32(get_session_id(ershdr));
+	}
+
 	return hdr_len;
 }
 EXPORT_SYMBOL(gre_parse_header);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 4e5bc4b..1a4e9ff 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -998,7 +998,9 @@
 			if (!inet_diag_bc_sk(bc, sk))
 				goto next_normal;
 
-			sock_hold(sk);
+			if (!refcount_inc_not_zero(&sk->sk_refcnt))
+				goto next_normal;
+
 			num_arr[accum] = num;
 			sk_arr[accum] = sk;
 			if (++accum == SKARR_SZ)
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 32662e9..d5984d3 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -72,6 +72,7 @@
 	if (unlikely(opt->optlen))
 		ip_forward_options(skb);
 
+	skb->tstamp = 0;
 	return dst_output(net, sk, skb);
 }
 
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index f686d77..d95b32a 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -347,10 +347,10 @@
 	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
 	struct rb_node **rbn, *parent;
 	struct sk_buff *skb1, *prev_tail;
+	int ihl, end, skb1_run_end;
 	struct net_device *dev;
 	unsigned int fragsize;
 	int flags, offset;
-	int ihl, end;
 	int err = -ENOENT;
 	u8 ecn;
 
@@ -420,9 +420,12 @@
 	 *   overlapping fragment, the entire datagram (and any constituent
 	 *   fragments) MUST be silently discarded.
 	 *
-	 * We do the same here for IPv4 (and increment an snmp counter).
+	 * We do the same here for IPv4 (and increment an snmp counter) but
+	 * we do not want to drop the whole queue in response to a duplicate
+	 * fragment.
 	 */
 
+	err = -EINVAL;
 	/* Find out where to put this fragment.  */
 	prev_tail = qp->q.fragments_tail;
 	if (!prev_tail)
@@ -444,13 +447,17 @@
 		do {
 			parent = *rbn;
 			skb1 = rb_to_skb(parent);
+			skb1_run_end = skb1->ip_defrag_offset +
+				       FRAG_CB(skb1)->frag_run_len;
 			if (end <= skb1->ip_defrag_offset)
 				rbn = &parent->rb_left;
-			else if (offset >= skb1->ip_defrag_offset +
-						FRAG_CB(skb1)->frag_run_len)
+			else if (offset >= skb1_run_end)
 				rbn = &parent->rb_right;
-			else /* Found an overlap with skb1. */
-				goto discard_qp;
+			else if (offset >= skb1->ip_defrag_offset &&
+				 end <= skb1_run_end)
+				goto err; /* No new data, potential duplicate */
+			else
+				goto discard_qp; /* Found an overlap */
 		} while (*rbn);
 		/* Here we have parent properly set, and rbn pointing to
 		 * one of its NULL left/right children. Insert skb.
@@ -495,7 +502,6 @@
 
 discard_qp:
 	inet_frag_kill(&qp->q);
-	err = -EINVAL;
 	__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
 err:
 	kfree_skb(skb);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 8cce0e9..f199945 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -269,20 +269,11 @@
 	int len;
 
 	itn = net_generic(net, erspan_net_id);
-	len = gre_hdr_len + sizeof(*ershdr);
-
-	/* Check based hdr len */
-	if (unlikely(!pskb_may_pull(skb, len)))
-		return PACKET_REJECT;
 
 	iph = ip_hdr(skb);
 	ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
 	ver = ershdr->ver;
 
-	/* The original GRE header does not have key field,
-	 * Use ERSPAN 10-bit session ID as key.
-	 */
-	tpi->key = cpu_to_be32(get_session_id(ershdr));
 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
 				  tpi->flags | TUNNEL_KEY,
 				  iph->saddr, iph->daddr, tpi->key);
@@ -570,8 +561,7 @@
 	dev->stats.tx_dropped++;
 }
 
-static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
-			   __be16 proto)
+static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	struct ip_tunnel_info *tun_info;
@@ -579,10 +569,10 @@
 	struct erspan_metadata *md;
 	struct rtable *rt = NULL;
 	bool truncate = false;
+	__be16 df, proto;
 	struct flowi4 fl;
 	int tunnel_hlen;
 	int version;
-	__be16 df;
 	int nhoff;
 	int thoff;
 
@@ -627,18 +617,20 @@
 	if (version == 1) {
 		erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
 				    ntohl(md->u.index), truncate, true);
+		proto = htons(ETH_P_ERSPAN);
 	} else if (version == 2) {
 		erspan_build_header_v2(skb,
 				       ntohl(tunnel_id_to_key32(key->tun_id)),
 				       md->u.md2.dir,
 				       get_hwid(&md->u.md2),
 				       truncate, true);
+		proto = htons(ETH_P_ERSPAN2);
 	} else {
 		goto err_free_rt;
 	}
 
 	gre_build_header(skb, 8, TUNNEL_SEQ,
-			 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
+			 proto, 0, htonl(tunnel->o_seqno++));
 
 	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
 
@@ -677,6 +669,9 @@
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	const struct iphdr *tnl_params;
 
+	if (!pskb_inet_may_pull(skb))
+		goto free_skb;
+
 	if (tunnel->collect_md) {
 		gre_fb_xmit(skb, dev, skb->protocol);
 		return NETDEV_TX_OK;
@@ -719,9 +714,13 @@
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	bool truncate = false;
+	__be16 proto;
+
+	if (!pskb_inet_may_pull(skb))
+		goto free_skb;
 
 	if (tunnel->collect_md) {
-		erspan_fb_xmit(skb, dev, skb->protocol);
+		erspan_fb_xmit(skb, dev);
 		return NETDEV_TX_OK;
 	}
 
@@ -737,19 +736,22 @@
 	}
 
 	/* Push ERSPAN header */
-	if (tunnel->erspan_ver == 1)
+	if (tunnel->erspan_ver == 1) {
 		erspan_build_header(skb, ntohl(tunnel->parms.o_key),
 				    tunnel->index,
 				    truncate, true);
-	else if (tunnel->erspan_ver == 2)
+		proto = htons(ETH_P_ERSPAN);
+	} else if (tunnel->erspan_ver == 2) {
 		erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
 				       tunnel->dir, tunnel->hwid,
 				       truncate, true);
-	else
+		proto = htons(ETH_P_ERSPAN2);
+	} else {
 		goto free_skb;
+	}
 
 	tunnel->parms.o_flags &= ~TUNNEL_KEY;
-	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
+	__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
 	return NETDEV_TX_OK;
 
 free_skb:
@@ -763,6 +765,9 @@
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 
+	if (!pskb_inet_may_pull(skb))
+		goto free_skb;
+
 	if (tunnel->collect_md) {
 		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
 		return NETDEV_TX_OK;
@@ -1457,12 +1462,17 @@
 {
 	struct ip_tunnel *t = netdev_priv(dev);
 	struct ip_tunnel_parm *p = &t->parms;
+	__be16 o_flags = p->o_flags;
+
+	if ((t->erspan_ver == 1 || t->erspan_ver == 2) &&
+	    !t->collect_md)
+		o_flags |= TUNNEL_KEY;
 
 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
-			 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
+			 gre_tnl_flags_to_gre_flags(o_flags)) ||
 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 5f6f7b3..797c4ff 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -488,6 +488,7 @@
 		goto drop;
 	}
 
+	iph = ip_hdr(skb);
 	skb->transport_header = skb->network_header + iph->ihl*4;
 
 	/* Remove any debris in the socket control block */
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 26c36cc..b7a2612 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -148,19 +148,17 @@
 
 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
 {
+	__be16 _ports[2], *ports;
 	struct sockaddr_in sin;
-	__be16 *ports;
-	int end;
-
-	end = skb_transport_offset(skb) + 4;
-	if (end > 0 && !pskb_may_pull(skb, end))
-		return;
 
 	/* All current transport protocols have the port numbers in the
 	 * first four bytes of the transport header and this function is
 	 * written with this assumption in mind.
 	 */
-	ports = (__be16 *)skb_transport_header(skb);
+	ports = skb_header_pointer(skb, skb_transport_offset(skb),
+				   sizeof(_ports), &_ports);
+	if (!ports)
+		return;
 
 	sin.sin_family = AF_INET;
 	sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 284a221..c4f5602 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -627,7 +627,6 @@
 		    const struct iphdr *tnl_params, u8 protocol)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
-	unsigned int inner_nhdr_len = 0;
 	const struct iphdr *inner_iph;
 	struct flowi4 fl4;
 	u8     tos, ttl;
@@ -637,14 +636,6 @@
 	__be32 dst;
 	bool connected;
 
-	/* ensure we can access the inner net header, for several users below */
-	if (skb->protocol == htons(ETH_P_IP))
-		inner_nhdr_len = sizeof(struct iphdr);
-	else if (skb->protocol == htons(ETH_P_IPV6))
-		inner_nhdr_len = sizeof(struct ipv6hdr);
-	if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
-		goto tx_error;
-
 	inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
 	connected = (tunnel->parms.iph.daddr != 0);
 
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index f38cb21..7f56944 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -241,6 +241,9 @@
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	struct flowi fl;
 
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
 	memset(&fl, 0, sizeof(fl));
 
 	switch (skb->protocol) {
@@ -253,15 +256,18 @@
 		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
 		break;
 	default:
-		dev->stats.tx_errors++;
-		dev_kfree_skb(skb);
-		return NETDEV_TX_OK;
+		goto tx_err;
 	}
 
 	/* override mark with tunnel output key */
 	fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
 
 	return vti_xmit(skb, dev, &fl);
+
+tx_err:
+	dev->stats.tx_errors++;
+	kfree_skb(skb);
+	return NETDEV_TX_OK;
 }
 
 static int vti4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 5660adc..f6275aa 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -69,6 +69,8 @@
 #include <net/nexthop.h>
 #include <net/switchdev.h>
 
+#include <linux/nospec.h>
+
 struct ipmr_rule {
 	struct fib_rule		common;
 };
@@ -1612,6 +1614,7 @@
 			return -EFAULT;
 		if (vr.vifi >= mrt->maxvif)
 			return -EINVAL;
+		vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
 		read_lock(&mrt_lock);
 		vif = &mrt->vif_table[vr.vifi];
 		if (VIF_EXISTS(mrt, vr.vifi)) {
@@ -1686,6 +1689,7 @@
 			return -EFAULT;
 		if (vr.vifi >= mrt->maxvif)
 			return -EINVAL;
+		vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
 		read_lock(&mrt_lock);
 		vif = &mrt->vif_table[vr.vifi];
 		if (VIF_EXISTS(mrt, vr.vifi)) {
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 2c8d313..fb1e7f2 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -57,17 +57,14 @@
 	enum clusterip_hashmode hash_mode;	/* which hashing mode */
 	u_int32_t hash_initval;			/* hash initialization */
 	struct rcu_head rcu;
-
+	struct net *net;			/* netns for pernet list */
 	char ifname[IFNAMSIZ];			/* device ifname */
-	struct notifier_block notifier;		/* refresh c->ifindex in it */
 };
 
 #ifdef CONFIG_PROC_FS
 static const struct file_operations clusterip_proc_fops;
 #endif
 
-static unsigned int clusterip_net_id __read_mostly;
-
 struct clusterip_net {
 	struct list_head configs;
 	/* lock protects the configs list */
@@ -78,16 +75,30 @@
 #endif
 };
 
+static unsigned int clusterip_net_id __read_mostly;
+static inline struct clusterip_net *clusterip_pernet(struct net *net)
+{
+	return net_generic(net, clusterip_net_id);
+}
+
 static inline void
 clusterip_config_get(struct clusterip_config *c)
 {
 	refcount_inc(&c->refcount);
 }
 
-
 static void clusterip_config_rcu_free(struct rcu_head *head)
 {
-	kfree(container_of(head, struct clusterip_config, rcu));
+	struct clusterip_config *config;
+	struct net_device *dev;
+
+	config = container_of(head, struct clusterip_config, rcu);
+	dev = dev_get_by_name(config->net, config->ifname);
+	if (dev) {
+		dev_mc_del(dev, config->clustermac);
+		dev_put(dev);
+	}
+	kfree(config);
 }
 
 static inline void
@@ -101,9 +112,9 @@
  * entry(rule) is removed, remove the config from lists, but don't free it
  * yet, since proc-files could still be holding references */
 static inline void
-clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
+clusterip_config_entry_put(struct clusterip_config *c)
 {
-	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+	struct clusterip_net *cn = clusterip_pernet(c->net);
 
 	local_bh_disable();
 	if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
@@ -118,8 +129,6 @@
 		spin_unlock(&cn->lock);
 		local_bh_enable();
 
-		unregister_netdevice_notifier(&c->notifier);
-
 		return;
 	}
 	local_bh_enable();
@@ -129,7 +138,7 @@
 __clusterip_config_find(struct net *net, __be32 clusterip)
 {
 	struct clusterip_config *c;
-	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+	struct clusterip_net *cn = clusterip_pernet(net);
 
 	list_for_each_entry_rcu(c, &cn->configs, list) {
 		if (c->clusterip == clusterip)
@@ -181,32 +190,37 @@
 		       void *ptr)
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct net *net = dev_net(dev);
+	struct clusterip_net *cn = clusterip_pernet(net);
 	struct clusterip_config *c;
 
-	c = container_of(this, struct clusterip_config, notifier);
-	switch (event) {
-	case NETDEV_REGISTER:
-		if (!strcmp(dev->name, c->ifname)) {
-			c->ifindex = dev->ifindex;
-			dev_mc_add(dev, c->clustermac);
+	spin_lock_bh(&cn->lock);
+	list_for_each_entry_rcu(c, &cn->configs, list) {
+		switch (event) {
+		case NETDEV_REGISTER:
+			if (!strcmp(dev->name, c->ifname)) {
+				c->ifindex = dev->ifindex;
+				dev_mc_add(dev, c->clustermac);
+			}
+			break;
+		case NETDEV_UNREGISTER:
+			if (dev->ifindex == c->ifindex) {
+				dev_mc_del(dev, c->clustermac);
+				c->ifindex = -1;
+			}
+			break;
+		case NETDEV_CHANGENAME:
+			if (!strcmp(dev->name, c->ifname)) {
+				c->ifindex = dev->ifindex;
+				dev_mc_add(dev, c->clustermac);
+			} else if (dev->ifindex == c->ifindex) {
+				dev_mc_del(dev, c->clustermac);
+				c->ifindex = -1;
+			}
+			break;
 		}
-		break;
-	case NETDEV_UNREGISTER:
-		if (dev->ifindex == c->ifindex) {
-			dev_mc_del(dev, c->clustermac);
-			c->ifindex = -1;
-		}
-		break;
-	case NETDEV_CHANGENAME:
-		if (!strcmp(dev->name, c->ifname)) {
-			c->ifindex = dev->ifindex;
-			dev_mc_add(dev, c->clustermac);
-		} else if (dev->ifindex == c->ifindex) {
-			dev_mc_del(dev, c->clustermac);
-			c->ifindex = -1;
-		}
-		break;
 	}
+	spin_unlock_bh(&cn->lock);
 
 	return NOTIFY_DONE;
 }
@@ -215,30 +229,44 @@
 clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
 		      __be32 ip, const char *iniface)
 {
-	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+	struct clusterip_net *cn = clusterip_pernet(net);
 	struct clusterip_config *c;
+	struct net_device *dev;
 	int err;
 
+	if (iniface[0] == '\0') {
+		pr_info("Please specify an interface name\n");
+		return ERR_PTR(-EINVAL);
+	}
+
 	c = kzalloc(sizeof(*c), GFP_ATOMIC);
 	if (!c)
 		return ERR_PTR(-ENOMEM);
 
-	strcpy(c->ifname, iniface);
-	c->ifindex = -1;
-	c->clusterip = ip;
+	dev = dev_get_by_name(net, iniface);
+	if (!dev) {
+		pr_info("no such interface %s\n", iniface);
+		kfree(c);
+		return ERR_PTR(-ENOENT);
+	}
+	c->ifindex = dev->ifindex;
+	strcpy(c->ifname, dev->name);
 	memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
+	dev_mc_add(dev, c->clustermac);
+	dev_put(dev);
+
+	c->clusterip = ip;
 	c->num_total_nodes = i->num_total_nodes;
 	clusterip_config_init_nodelist(c, i);
 	c->hash_mode = i->hash_mode;
 	c->hash_initval = i->hash_initval;
+	c->net = net;
 	refcount_set(&c->refcount, 1);
 
 	spin_lock_bh(&cn->lock);
 	if (__clusterip_config_find(net, ip)) {
-		spin_unlock_bh(&cn->lock);
-		kfree(c);
-
-		return ERR_PTR(-EBUSY);
+		err = -EBUSY;
+		goto out_config_put;
 	}
 
 	list_add_rcu(&c->list, &cn->configs);
@@ -260,22 +288,17 @@
 	}
 #endif
 
-	c->notifier.notifier_call = clusterip_netdev_event;
-	err = register_netdevice_notifier(&c->notifier);
-	if (!err) {
-		refcount_set(&c->entries, 1);
-		return c;
-	}
+	refcount_set(&c->entries, 1);
+	return c;
 
 #ifdef CONFIG_PROC_FS
-	proc_remove(c->pde);
 err:
 #endif
 	spin_lock_bh(&cn->lock);
 	list_del_rcu(&c->list);
+out_config_put:
 	spin_unlock_bh(&cn->lock);
 	clusterip_config_put(c);
-
 	return ERR_PTR(err);
 }
 
@@ -475,34 +498,20 @@
 				&e->ip.dst.s_addr);
 			return -EINVAL;
 		} else {
-			struct net_device *dev;
-
-			if (e->ip.iniface[0] == '\0') {
-				pr_info("Please specify an interface name\n");
-				return -EINVAL;
-			}
-
-			dev = dev_get_by_name(par->net, e->ip.iniface);
-			if (!dev) {
-				pr_info("no such interface %s\n",
-					e->ip.iniface);
-				return -ENOENT;
-			}
-			dev_put(dev);
-
 			config = clusterip_config_init(par->net, cipinfo,
 						       e->ip.dst.s_addr,
 						       e->ip.iniface);
 			if (IS_ERR(config))
 				return PTR_ERR(config);
 		}
-	}
+	} else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN))
+		return -EINVAL;
 
 	ret = nf_ct_netns_get(par->net, par->family);
 	if (ret < 0) {
 		pr_info("cannot load conntrack support for proto=%u\n",
 			par->family);
-		clusterip_config_entry_put(par->net, config);
+		clusterip_config_entry_put(config);
 		clusterip_config_put(config);
 		return ret;
 	}
@@ -524,7 +533,7 @@
 
 	/* if no more entries are referencing the config, remove it
 	 * from the list and destroy the proc entry */
-	clusterip_config_entry_put(par->net, cipinfo->config);
+	clusterip_config_entry_put(cipinfo->config);
 
 	clusterip_config_put(cipinfo->config);
 
@@ -806,7 +815,7 @@
 
 static int clusterip_net_init(struct net *net)
 {
-	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+	struct clusterip_net *cn = clusterip_pernet(net);
 	int ret;
 
 	INIT_LIST_HEAD(&cn->configs);
@@ -831,13 +840,12 @@
 
 static void clusterip_net_exit(struct net *net)
 {
-	struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+	struct clusterip_net *cn = clusterip_pernet(net);
 #ifdef CONFIG_PROC_FS
 	proc_remove(cn->procdir);
 	cn->procdir = NULL;
 #endif
 	nf_unregister_net_hook(net, &cip_arp_ops);
-	WARN_ON_ONCE(!list_empty(&cn->configs));
 }
 
 static struct pernet_operations clusterip_net_ops = {
@@ -847,6 +855,10 @@
 	.size = sizeof(struct clusterip_net),
 };
 
+struct notifier_block cip_netdev_notifier = {
+	.notifier_call = clusterip_netdev_event
+};
+
 static int __init clusterip_tg_init(void)
 {
 	int ret;
@@ -859,11 +871,17 @@
 	if (ret < 0)
 		goto cleanup_subsys;
 
+	ret = register_netdevice_notifier(&cip_netdev_notifier);
+	if (ret < 0)
+		goto unregister_target;
+
 	pr_info("ClusterIP Version %s loaded successfully\n",
 		CLUSTERIP_VERSION);
 
 	return 0;
 
+unregister_target:
+	xt_unregister_target(&clusterip_tg_reg);
 cleanup_subsys:
 	unregister_pernet_subsys(&clusterip_net_ops);
 	return ret;
@@ -873,6 +891,7 @@
 {
 	pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION);
 
+	unregister_netdevice_notifier(&cip_netdev_notifier);
 	xt_unregister_target(&clusterip_tg_reg);
 	unregister_pernet_subsys(&clusterip_net_ops);
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5d0d6cc..e5b6c23 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1186,7 +1186,7 @@
 	flags = msg->msg_flags;
 
 	if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
-		if (sk->sk_state != TCP_ESTABLISHED) {
+		if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
 			err = -EINVAL;
 			goto out_err;
 		}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 57eae8d..b1b5a64 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -224,7 +224,7 @@
 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
 		if (icsk->icsk_retransmits) {
 			dst_negative_advice(sk);
-		} else if (!tp->syn_data && !tp->syn_fastopen) {
+		} else {
 			sk_rethink_txhash(sk);
 		}
 		retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 31a42ae..c5f31162 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -791,15 +791,23 @@
 		const int hlen = skb_network_header_len(skb) +
 				 sizeof(struct udphdr);
 
-		if (hlen + cork->gso_size > cork->fragsize)
+		if (hlen + cork->gso_size > cork->fragsize) {
+			kfree_skb(skb);
 			return -EINVAL;
-		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
+		}
+		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+			kfree_skb(skb);
 			return -EINVAL;
-		if (sk->sk_no_check_tx)
+		}
+		if (sk->sk_no_check_tx) {
+			kfree_skb(skb);
 			return -EINVAL;
+		}
 		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
-		    dst_xfrm(skb_dst(skb)))
+		    dst_xfrm(skb_dst(skb))) {
+			kfree_skb(skb);
 			return -EIO;
+		}
 
 		skb_shinfo(skb)->gso_size = cork->gso_size;
 		skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 7e74c7f..37ac864 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4733,8 +4733,8 @@
 			 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
 
 	idev = ipv6_find_idev(dev);
-	if (IS_ERR(idev))
-		return PTR_ERR(idev);
+	if (!idev)
+		return -ENOBUFS;
 
 	if (!ipv6_allow_optimistic_dad(net, idev))
 		cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 6c330ed..cc855d9 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -325,6 +325,7 @@
 
 	/* Check if the address belongs to the host. */
 	if (addr_type == IPV6_ADDR_MAPPED) {
+		struct net_device *dev = NULL;
 		int chk_addr_ret;
 
 		/* Binding to v4-mapped address on a v6-only socket
@@ -335,9 +336,20 @@
 			goto out;
 		}
 
+		rcu_read_lock();
+		if (sk->sk_bound_dev_if) {
+			dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
+			if (!dev) {
+				err = -ENODEV;
+				goto out_unlock;
+			}
+		}
+
 		/* Reproduce AF_INET checks to make the bindings consistent */
 		v4addr = addr->sin6_addr.s6_addr32[3];
-		chk_addr_ret = inet_addr_type(net, v4addr);
+		chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
+		rcu_read_unlock();
+
 		if (!inet_can_nonlocal_bind(net, inet) &&
 		    v4addr != htonl(INADDR_ANY) &&
 		    chk_addr_ret != RTN_LOCAL &&
@@ -365,6 +377,9 @@
 					err = -EINVAL;
 					goto out_unlock;
 				}
+			}
+
+			if (sk->sk_bound_dev_if) {
 				dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
 				if (!dev) {
 					err = -ENODEV;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index f31fe86..aea3cb4 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -341,6 +341,7 @@
 	skb_reset_network_header(skb);
 	iph = ipv6_hdr(skb);
 	iph->daddr = fl6->daddr;
+	ip6_flow_hdr(iph, 0, 0);
 
 	serr = SKB_EXT_ERR(skb);
 	serr->ee.ee_errno = err;
@@ -700,17 +701,15 @@
 	}
 	if (np->rxopt.bits.rxorigdstaddr) {
 		struct sockaddr_in6 sin6;
-		__be16 *ports;
-		int end;
+		__be16 _ports[2], *ports;
 
-		end = skb_transport_offset(skb) + 4;
-		if (end <= 0 || pskb_may_pull(skb, end)) {
+		ports = skb_header_pointer(skb, skb_transport_offset(skb),
+					   sizeof(_ports), &_ports);
+		if (ports) {
 			/* All current transport protocols have the port numbers in the
 			 * first four bytes of the transport header and this function is
 			 * written with this assumption in mind.
 			 */
-			ports = (__be16 *)skb_transport_header(skb);
-
 			sin6.sin6_family = AF_INET6;
 			sin6.sin6_addr = ipv6_hdr(skb)->daddr;
 			sin6.sin6_port = ports[1];
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index c9c53ad..6d14cbe 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -421,10 +421,10 @@
 static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
 		       const struct in6_addr *force_saddr)
 {
-	struct net *net = dev_net(skb->dev);
 	struct inet6_dev *idev = NULL;
 	struct ipv6hdr *hdr = ipv6_hdr(skb);
 	struct sock *sk;
+	struct net *net;
 	struct ipv6_pinfo *np;
 	const struct in6_addr *saddr = NULL;
 	struct dst_entry *dst;
@@ -435,12 +435,16 @@
 	int iif = 0;
 	int addr_type = 0;
 	int len;
-	u32 mark = IP6_REPLY_MARK(net, skb->mark);
+	u32 mark;
 
 	if ((u8 *)hdr < skb->head ||
 	    (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
 		return;
 
+	if (!skb->dev)
+		return;
+	net = dev_net(skb->dev);
+	mark = IP6_REPLY_MARK(net, skb->mark);
 	/*
 	 *	Make sure we respect the rules
 	 *	i.e. RFC 1885 2.4(e)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index e493b04..be04877 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -550,13 +550,9 @@
 	struct ip6_tnl *tunnel;
 	u8 ver;
 
-	if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
-		return PACKET_REJECT;
-
 	ipv6h = ipv6_hdr(skb);
 	ershdr = (struct erspan_base_hdr *)skb->data;
 	ver = ershdr->ver;
-	tpi->key = cpu_to_be32(get_session_id(ershdr));
 
 	tunnel = ip6gre_tunnel_lookup(skb->dev,
 				      &ipv6h->saddr, &ipv6h->daddr, tpi->key,
@@ -897,6 +893,9 @@
 	struct net_device_stats *stats = &t->dev->stats;
 	int ret;
 
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
 	if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
 		goto tx_err;
 
@@ -935,10 +934,14 @@
 	__u8 dsfield = false;
 	struct flowi6 fl6;
 	int err = -EINVAL;
+	__be16 proto;
 	__u32 mtu;
 	int nhoff;
 	int thoff;
 
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
 	if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
 		goto tx_err;
 
@@ -1011,8 +1014,6 @@
 			goto tx_err;
 		}
 	} else {
-		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
-
 		switch (skb->protocol) {
 		case htons(ETH_P_IP):
 			memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -1020,7 +1021,7 @@
 						 &dsfield, &encap_limit);
 			break;
 		case htons(ETH_P_IPV6):
-			if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
+			if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
 				goto tx_err;
 			if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
 						     &dsfield, &encap_limit))
@@ -1047,8 +1048,9 @@
 	}
 
 	/* Push GRE header. */
-	gre_build_header(skb, 8, TUNNEL_SEQ,
-			 htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
+	proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
+					   : htons(ETH_P_ERSPAN2);
+	gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
 
 	/* TooBig packet may have updated dst->dev's mtu */
 	if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
@@ -1181,6 +1183,10 @@
 	t->parms.i_flags = p->i_flags;
 	t->parms.o_flags = p->o_flags;
 	t->parms.fwmark = p->fwmark;
+	t->parms.erspan_ver = p->erspan_ver;
+	t->parms.index = p->index;
+	t->parms.dir = p->dir;
+	t->parms.hwid = p->hwid;
 	dst_cache_reset(&t->dst_cache);
 }
 
@@ -2043,9 +2049,9 @@
 			     struct nlattr *data[],
 			     struct netlink_ext_ack *extack)
 {
-	struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
 	struct __ip6_tnl_parm p;
-	struct ip6_tnl *t;
 
 	t = ip6gre_changelink_common(dev, tb, data, &p, extack);
 	if (IS_ERR(t))
@@ -2114,12 +2120,17 @@
 {
 	struct ip6_tnl *t = netdev_priv(dev);
 	struct __ip6_tnl_parm *p = &t->parms;
+	__be16 o_flags = p->o_flags;
+
+	if ((p->erspan_ver == 1 || p->erspan_ver == 2) &&
+	    !p->collect_md)
+		o_flags |= TUNNEL_KEY;
 
 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
 	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
 			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
 	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
-			 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
+			 gre_tnl_flags_to_gre_flags(o_flags)) ||
 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
 	    nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2694def..0bb87f3 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -378,6 +378,7 @@
 	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
 	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
 
+	skb->tstamp = 0;
 	return dst_output(net, sk, skb);
 }
 
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a9d06d4..0c6403c 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -901,6 +901,7 @@
 			goto drop;
 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
 			goto drop;
+		ipv6h = ipv6_hdr(skb);
 		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
 			goto drop;
 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
@@ -1242,10 +1243,6 @@
 	u8 tproto;
 	int err;
 
-	/* ensure we can access the full inner ip header */
-	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
-		return -1;
-
 	iph = ip_hdr(skb);
 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 
@@ -1320,9 +1317,6 @@
 	u8 tproto;
 	int err;
 
-	if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
-		return -1;
-
 	ipv6h = ipv6_hdr(skb);
 	tproto = READ_ONCE(t->parms.proto);
 	if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
@@ -1404,6 +1398,9 @@
 	struct net_device_stats *stats = &t->dev->stats;
 	int ret;
 
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
 	switch (skb->protocol) {
 	case htons(ETH_P_IP):
 		ret = ip4ip6_tnl_xmit(skb, dev);
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index b283f29..caad40d 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -15,7 +15,7 @@
 int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
 		     struct socket **sockp)
 {
-	struct sockaddr_in6 udp6_addr;
+	struct sockaddr_in6 udp6_addr = {};
 	int err;
 	struct socket *sock = NULL;
 
@@ -42,6 +42,7 @@
 		goto error;
 
 	if (cfg->peer_udp_port) {
+		memset(&udp6_addr, 0, sizeof(udp6_addr));
 		udp6_addr.sin6_family = AF_INET6;
 		memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
 		       sizeof(udp6_addr.sin6_addr));
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index eeaf745..8b6eeff 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -318,6 +318,7 @@
 			return 0;
 		}
 
+		ipv6h = ipv6_hdr(skb);
 		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
 			t->dev->stats.rx_dropped++;
 			rcu_read_unlock();
@@ -521,18 +522,18 @@
 {
 	struct ip6_tnl *t = netdev_priv(dev);
 	struct net_device_stats *stats = &t->dev->stats;
-	struct ipv6hdr *ipv6h;
 	struct flowi fl;
 	int ret;
 
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
 	memset(&fl, 0, sizeof(fl));
 
 	switch (skb->protocol) {
 	case htons(ETH_P_IPV6):
-		ipv6h = ipv6_hdr(skb);
-
 		if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
-		    vti6_addr_conflict(t, ipv6h))
+		    vti6_addr_conflict(t, ipv6_hdr(skb)))
 			goto tx_err;
 
 		xfrm_decode_session(skb, &fl, AF_INET6);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index d0b7e02..10aafea 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -51,6 +51,9 @@
 #include <linux/export.h>
 #include <net/ip6_checksum.h>
 #include <linux/netconf.h>
+#include <net/ip_tunnels.h>
+
+#include <linux/nospec.h>
 
 struct ip6mr_rule {
 	struct fib_rule		common;
@@ -591,13 +594,12 @@
 		.flowi6_iif	= skb->skb_iif ? : LOOPBACK_IFINDEX,
 		.flowi6_mark	= skb->mark,
 	};
-	int err;
 
-	err = ip6mr_fib_lookup(net, &fl6, &mrt);
-	if (err < 0) {
-		kfree_skb(skb);
-		return err;
-	}
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
+	if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
+		goto tx_err;
 
 	read_lock(&mrt_lock);
 	dev->stats.tx_bytes += skb->len;
@@ -606,6 +608,11 @@
 	read_unlock(&mrt_lock);
 	kfree_skb(skb);
 	return NETDEV_TX_OK;
+
+tx_err:
+	dev->stats.tx_errors++;
+	kfree_skb(skb);
+	return NETDEV_TX_OK;
 }
 
 static int reg_vif_get_iflink(const struct net_device *dev)
@@ -1499,6 +1506,9 @@
 			continue;
 		rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
 		list_del_rcu(&c->list);
+		call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
+					       FIB_EVENT_ENTRY_DEL,
+					       (struct mfc6_cache *)c, mrt->id);
 		mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
 		mr_cache_put(c);
 	}
@@ -1507,10 +1517,6 @@
 		spin_lock_bh(&mfc_unres_lock);
 		list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
 			list_del(&c->list);
-			call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
-						       FIB_EVENT_ENTRY_DEL,
-						       (struct mfc6_cache *)c,
-						       mrt->id);
 			mr6_netlink_event(mrt, (struct mfc6_cache *)c,
 					  RTM_DELROUTE);
 			ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
@@ -1831,6 +1837,7 @@
 			return -EFAULT;
 		if (vr.mifi >= mrt->maxvif)
 			return -EINVAL;
+		vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
 		read_lock(&mrt_lock);
 		vif = &mrt->vif_table[vr.mifi];
 		if (VIF_EXISTS(mrt, vr.mifi)) {
@@ -1905,6 +1912,7 @@
 			return -EFAULT;
 		if (vr.mifi >= mrt->maxvif)
 			return -EINVAL;
+		vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
 		read_lock(&mrt_lock);
 		vif = &mrt->vif_table[vr.mifi];
 		if (VIF_EXISTS(mrt, vr.mifi)) {
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index d3fd2d7..7c94339 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -384,6 +384,7 @@
 		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
 			kfree_skb_partial(fp, headstolen);
 		} else {
+			fp->sk = NULL;
 			if (!skb_shinfo(head)->frag_list)
 				skb_shinfo(head)->frag_list = fp;
 			head->data_len += fp->len;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9959c9c..7b832c3 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -210,7 +210,9 @@
 	n = __ipv6_neigh_lookup(dev, daddr);
 	if (n)
 		return n;
-	return neigh_create(&nd_tbl, daddr, dev);
+
+	n = neigh_create(&nd_tbl, daddr, dev);
+	return IS_ERR(n) ? NULL : n;
 }
 
 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 8181ee7..ee5403c 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -146,6 +146,8 @@
 	} else {
 		ip6_flow_hdr(hdr, 0, flowlabel);
 		hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
+
+		memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
 	}
 
 	hdr->nexthdr = NEXTHDR_ROUTING;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e9400ff..eb162bd 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1021,6 +1021,9 @@
 static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
 				   struct net_device *dev)
 {
+	if (!pskb_inet_may_pull(skb))
+		goto tx_err;
+
 	switch (skb->protocol) {
 	case htons(ETH_P_IP):
 		sit_tunnel_xmit__(skb, dev, IPPROTO_IPIP);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e2e3aa8..f13382e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1081,15 +1081,23 @@
 		const int hlen = skb_network_header_len(skb) +
 				 sizeof(struct udphdr);
 
-		if (hlen + cork->gso_size > cork->fragsize)
+		if (hlen + cork->gso_size > cork->fragsize) {
+			kfree_skb(skb);
 			return -EINVAL;
-		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
+		}
+		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+			kfree_skb(skb);
 			return -EINVAL;
-		if (udp_sk(sk)->no_check6_tx)
+		}
+		if (udp_sk(sk)->no_check6_tx) {
+			kfree_skb(skb);
 			return -EINVAL;
+		}
 		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
-		    dst_xfrm(skb_dst(skb)))
+		    dst_xfrm(skb_dst(skb))) {
+			kfree_skb(skb);
 			return -EIO;
+		}
 
 		skb_shinfo(skb)->gso_size = cork->gso_size;
 		skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 26f1d43..fed6bec 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -83,8 +83,7 @@
 #define L2TP_SLFLAG_S	   0x40000000
 #define L2TP_SL_SEQ_MASK   0x00ffffff
 
-#define L2TP_HDR_SIZE_SEQ		10
-#define L2TP_HDR_SIZE_NOSEQ		6
+#define L2TP_HDR_SIZE_MAX		14
 
 /* Default trace flags */
 #define L2TP_DEFAULT_DEBUG_FLAGS	0
@@ -808,7 +807,7 @@
 	__skb_pull(skb, sizeof(struct udphdr));
 
 	/* Short packet? */
-	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
+	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
 		l2tp_info(tunnel, L2TP_MSG_DATA,
 			  "%s: recv short packet (len=%d)\n",
 			  tunnel->name, skb->len);
@@ -884,6 +883,10 @@
 		goto error;
 	}
 
+	if (tunnel->version == L2TP_HDR_VER_3 &&
+	    l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+		goto error;
+
 	l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
 	l2tp_session_dec_refcount(session);
 
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 9c9afe9..b2ce902 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -301,6 +301,26 @@
 }
 #endif
 
+static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
+					       unsigned char **ptr, unsigned char **optr)
+{
+	int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
+
+	if (opt_len > 0) {
+		int off = *ptr - *optr;
+
+		if (!pskb_may_pull(skb, off + opt_len))
+			return -1;
+
+		if (skb->data != *optr) {
+			*optr = skb->data;
+			*ptr = skb->data + off;
+		}
+	}
+
+	return 0;
+}
+
 #define l2tp_printk(ptr, type, func, fmt, ...)				\
 do {									\
 	if (((ptr)->debug) & (type))					\
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 35f6f86..d4c6052 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -165,6 +165,9 @@
 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
 	}
 
+	if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+		goto discard_sess;
+
 	l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
 	l2tp_session_dec_refcount(session);
 
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 237f1a4..0ae6899 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -178,6 +178,9 @@
 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
 	}
 
+	if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+		goto discard_sess;
+
 	l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
 	l2tp_session_dec_refcount(session);
 
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5f3c81e..3a0171a 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -7,6 +7,7 @@
  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (c) 2016        Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -1951,6 +1952,8 @@
 	WARN(local->open_count, "%s: open count remains %d\n",
 	     wiphy_name(local->hw.wiphy), local->open_count);
 
+	ieee80211_txq_teardown_flows(local);
+
 	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
 		list_del(&sdata->list);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 5136278..68db2a3 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1198,7 +1198,6 @@
 	rtnl_unlock();
 	ieee80211_led_exit(local);
 	ieee80211_wep_free(local);
-	ieee80211_txq_teardown_flows(local);
  fail_flows:
 	destroy_workqueue(local->workqueue);
  fail_workqueue:
@@ -1224,7 +1223,6 @@
 #if IS_ENABLED(CONFIG_IPV6)
 	unregister_inet6addr_notifier(&local->ifa6_notifier);
 #endif
-	ieee80211_txq_teardown_flows(local);
 
 	rtnl_lock();
 
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 7fa10d0..534a604 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -556,6 +556,11 @@
 	}
 
 	ieee80211_led_tx(local);
+
+	if (skb_has_frag_list(skb)) {
+		kfree_skb_list(skb_shinfo(skb)->frag_list);
+		skb_shinfo(skb)->frag_list = NULL;
+	}
 }
 
 /*
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 1b371f6..4dba997 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -884,6 +884,20 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_TARGET_HARDIDLETIMER
+	tristate  "HARDIDLETIMER target support"
+	depends on NETFILTER_ADVANCED
+	help
+
+	  This option adds the `HARDIDLETIMER' target.  Each matching packet
+	  resets the timer associated with label specified when the rule is
+	  added.  When the timer expires, it triggers a sysfs notification.
+	  The remaining time for expiration can be read via sysfs.
+	   Compared to IDLETIMER HARDIDLETIMER will send notification when
+	   CPU in suspend too.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_TARGET_LED
 	tristate '"LED" target support'
 	depends on LEDS_CLASS && LEDS_TRIGGERS
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index f2c701e..ad603cd 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -156,6 +156,7 @@
 obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER) += xt_HARDIDLETIMER.o
 
 # matches
 obj-$(CONFIG_NETFILTER_XT_MATCH_ADDRTYPE) += xt_addrtype.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index c00b6a2..13ade57 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -219,10 +219,6 @@
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 	u32 ip;
 
-	/* MAC can be src only */
-	if (!(opt->flags & IPSET_DIM_TWO_SRC))
-		return 0;
-
 	ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
 	if (ip < map->first_ip || ip > map->last_ip)
 		return -IPSET_ERR_BITMAP_RANGE;
@@ -233,7 +229,11 @@
 		return -EINVAL;
 
 	e.id = ip_to_id(map, ip);
-	memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
+
+	if (opt->flags & IPSET_DIM_ONE_SRC)
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+	else
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
 
 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
 }
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index 1ab5ed2..fd87de3 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -103,7 +103,11 @@
 	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
 		return -EINVAL;
 
-	memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
+	if (opt->flags & IPSET_DIM_ONE_SRC)
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+	else
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
+
 	if (ether_addr_equal(e.ether, invalid_ether))
 		return -EINVAL;
 
@@ -211,15 +215,15 @@
 	};
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-	 /* MAC can be src only */
-	if (!(opt->flags & IPSET_DIM_TWO_SRC))
-		return 0;
-
 	if (skb_mac_header(skb) < skb->head ||
 	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
 		return -EINVAL;
 
-	memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
+	if (opt->flags & IPSET_DIM_ONE_SRC)
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+	else
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
+
 	if (ether_addr_equal(e.ether, invalid_ether))
 		return -EINVAL;
 
diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c
index f9d5a2a..4fe5f24 100644
--- a/net/netfilter/ipset/ip_set_hash_mac.c
+++ b/net/netfilter/ipset/ip_set_hash_mac.c
@@ -81,15 +81,15 @@
 	struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } };
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-	 /* MAC can be src only */
-	if (!(opt->flags & IPSET_DIM_ONE_SRC))
-		return 0;
-
 	if (skb_mac_header(skb) < skb->head ||
 	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
 		return -EINVAL;
 
-	ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+	if (opt->flags & IPSET_DIM_ONE_SRC)
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+	else
+		ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
+
 	if (is_zero_ether_addr(e.ether))
 		return -EINVAL;
 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 4eef55d..8da228d 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -531,8 +531,8 @@
 		ret = -EMSGSIZE;
 	} else {
 		cb->args[IPSET_CB_ARG0] = i;
+		ipset_nest_end(skb, atd);
 	}
-	ipset_nest_end(skb, atd);
 out:
 	rcu_read_unlock();
 	return ret;
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index b6d0f6d..7554c56 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -33,12 +33,6 @@
 
 #define CONNCOUNT_SLOTS		256U
 
-#ifdef CONFIG_LOCKDEP
-#define CONNCOUNT_LOCK_SLOTS	8U
-#else
-#define CONNCOUNT_LOCK_SLOTS	256U
-#endif
-
 #define CONNCOUNT_GC_MAX_NODES	8
 #define MAX_KEYLEN		5
 
@@ -49,8 +43,6 @@
 	struct nf_conntrack_zone	zone;
 	int				cpu;
 	u32				jiffies32;
-	bool				dead;
-	struct rcu_head			rcu_head;
 };
 
 struct nf_conncount_rb {
@@ -60,7 +52,7 @@
 	struct rcu_head rcu_head;
 };
 
-static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
+static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
 
 struct nf_conncount_data {
 	unsigned int keylen;
@@ -89,79 +81,25 @@
 	return memcmp(a, b, klen * sizeof(u32));
 }
 
-enum nf_conncount_list_add
-nf_conncount_add(struct nf_conncount_list *list,
-		 const struct nf_conntrack_tuple *tuple,
-		 const struct nf_conntrack_zone *zone)
-{
-	struct nf_conncount_tuple *conn;
-
-	if (WARN_ON_ONCE(list->count > INT_MAX))
-		return NF_CONNCOUNT_ERR;
-
-	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
-	if (conn == NULL)
-		return NF_CONNCOUNT_ERR;
-
-	conn->tuple = *tuple;
-	conn->zone = *zone;
-	conn->cpu = raw_smp_processor_id();
-	conn->jiffies32 = (u32)jiffies;
-	conn->dead = false;
-	spin_lock_bh(&list->list_lock);
-	if (list->dead == true) {
-		kmem_cache_free(conncount_conn_cachep, conn);
-		spin_unlock_bh(&list->list_lock);
-		return NF_CONNCOUNT_SKIP;
-	}
-	list_add_tail(&conn->node, &list->head);
-	list->count++;
-	spin_unlock_bh(&list->list_lock);
-	return NF_CONNCOUNT_ADDED;
-}
-EXPORT_SYMBOL_GPL(nf_conncount_add);
-
-static void __conn_free(struct rcu_head *h)
-{
-	struct nf_conncount_tuple *conn;
-
-	conn = container_of(h, struct nf_conncount_tuple, rcu_head);
-	kmem_cache_free(conncount_conn_cachep, conn);
-}
-
-static bool conn_free(struct nf_conncount_list *list,
+static void conn_free(struct nf_conncount_list *list,
 		      struct nf_conncount_tuple *conn)
 {
-	bool free_entry = false;
-
-	spin_lock_bh(&list->list_lock);
-
-	if (conn->dead) {
-		spin_unlock_bh(&list->list_lock);
-		return free_entry;
-	}
+	lockdep_assert_held(&list->list_lock);
 
 	list->count--;
-	conn->dead = true;
-	list_del_rcu(&conn->node);
-	if (list->count == 0) {
-		list->dead = true;
-		free_entry = true;
-	}
+	list_del(&conn->node);
 
-	spin_unlock_bh(&list->list_lock);
-	call_rcu(&conn->rcu_head, __conn_free);
-	return free_entry;
+	kmem_cache_free(conncount_conn_cachep, conn);
 }
 
 static const struct nf_conntrack_tuple_hash *
 find_or_evict(struct net *net, struct nf_conncount_list *list,
-	      struct nf_conncount_tuple *conn, bool *free_entry)
+	      struct nf_conncount_tuple *conn)
 {
 	const struct nf_conntrack_tuple_hash *found;
 	unsigned long a, b;
 	int cpu = raw_smp_processor_id();
-	__s32 age;
+	u32 age;
 
 	found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
 	if (found)
@@ -176,52 +114,45 @@
 	 */
 	age = a - b;
 	if (conn->cpu == cpu || age >= 2) {
-		*free_entry = conn_free(list, conn);
+		conn_free(list, conn);
 		return ERR_PTR(-ENOENT);
 	}
 
 	return ERR_PTR(-EAGAIN);
 }
 
-void nf_conncount_lookup(struct net *net,
-			 struct nf_conncount_list *list,
-			 const struct nf_conntrack_tuple *tuple,
-			 const struct nf_conntrack_zone *zone,
-			 bool *addit)
+static int __nf_conncount_add(struct net *net,
+			      struct nf_conncount_list *list,
+			      const struct nf_conntrack_tuple *tuple,
+			      const struct nf_conntrack_zone *zone)
 {
 	const struct nf_conntrack_tuple_hash *found;
 	struct nf_conncount_tuple *conn, *conn_n;
 	struct nf_conn *found_ct;
 	unsigned int collect = 0;
-	bool free_entry = false;
-
-	/* best effort only */
-	*addit = tuple ? true : false;
 
 	/* check the saved connections */
 	list_for_each_entry_safe(conn, conn_n, &list->head, node) {
 		if (collect > CONNCOUNT_GC_MAX_NODES)
 			break;
 
-		found = find_or_evict(net, list, conn, &free_entry);
+		found = find_or_evict(net, list, conn);
 		if (IS_ERR(found)) {
 			/* Not found, but might be about to be confirmed */
 			if (PTR_ERR(found) == -EAGAIN) {
-				if (!tuple)
-					continue;
-
 				if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
 				    nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
 				    nf_ct_zone_id(zone, zone->dir))
-					*addit = false;
-			} else if (PTR_ERR(found) == -ENOENT)
+					return 0; /* already exists */
+			} else {
 				collect++;
+			}
 			continue;
 		}
 
 		found_ct = nf_ct_tuplehash_to_ctrack(found);
 
-		if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
+		if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
 		    nf_ct_zone_equal(found_ct, zone, zone->dir)) {
 			/*
 			 * We should not see tuples twice unless someone hooks
@@ -229,7 +160,8 @@
 			 *
 			 * Attempt to avoid a re-add in this case.
 			 */
-			*addit = false;
+			nf_ct_put(found_ct);
+			return 0;
 		} else if (already_closed(found_ct)) {
 			/*
 			 * we do not care about connections which are
@@ -243,19 +175,48 @@
 
 		nf_ct_put(found_ct);
 	}
+
+	if (WARN_ON_ONCE(list->count > INT_MAX))
+		return -EOVERFLOW;
+
+	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
+	if (conn == NULL)
+		return -ENOMEM;
+
+	conn->tuple = *tuple;
+	conn->zone = *zone;
+	conn->cpu = raw_smp_processor_id();
+	conn->jiffies32 = (u32)jiffies;
+	list_add_tail(&conn->node, &list->head);
+	list->count++;
+	return 0;
 }
-EXPORT_SYMBOL_GPL(nf_conncount_lookup);
+
+int nf_conncount_add(struct net *net,
+		     struct nf_conncount_list *list,
+		     const struct nf_conntrack_tuple *tuple,
+		     const struct nf_conntrack_zone *zone)
+{
+	int ret;
+
+	/* check the saved connections */
+	spin_lock_bh(&list->list_lock);
+	ret = __nf_conncount_add(net, list, tuple, zone);
+	spin_unlock_bh(&list->list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conncount_add);
 
 void nf_conncount_list_init(struct nf_conncount_list *list)
 {
 	spin_lock_init(&list->list_lock);
 	INIT_LIST_HEAD(&list->head);
 	list->count = 0;
-	list->dead = false;
 }
 EXPORT_SYMBOL_GPL(nf_conncount_list_init);
 
-/* Return true if the list is empty */
+/* Return true if the list is empty. Must be called with BH disabled. */
 bool nf_conncount_gc_list(struct net *net,
 			  struct nf_conncount_list *list)
 {
@@ -263,17 +224,17 @@
 	struct nf_conncount_tuple *conn, *conn_n;
 	struct nf_conn *found_ct;
 	unsigned int collected = 0;
-	bool free_entry = false;
 	bool ret = false;
 
+	/* don't bother if other cpu is already doing GC */
+	if (!spin_trylock(&list->list_lock))
+		return false;
+
 	list_for_each_entry_safe(conn, conn_n, &list->head, node) {
-		found = find_or_evict(net, list, conn, &free_entry);
+		found = find_or_evict(net, list, conn);
 		if (IS_ERR(found)) {
-			if (PTR_ERR(found) == -ENOENT)  {
-				if (free_entry)
-					return true;
+			if (PTR_ERR(found) == -ENOENT)
 				collected++;
-			}
 			continue;
 		}
 
@@ -284,23 +245,19 @@
 			 * closed already -> ditch it
 			 */
 			nf_ct_put(found_ct);
-			if (conn_free(list, conn))
-				return true;
+			conn_free(list, conn);
 			collected++;
 			continue;
 		}
 
 		nf_ct_put(found_ct);
 		if (collected > CONNCOUNT_GC_MAX_NODES)
-			return false;
+			break;
 	}
 
-	spin_lock_bh(&list->list_lock);
-	if (!list->count) {
-		list->dead = true;
+	if (!list->count)
 		ret = true;
-	}
-	spin_unlock_bh(&list->list_lock);
+	spin_unlock(&list->list_lock);
 
 	return ret;
 }
@@ -314,6 +271,7 @@
 	kmem_cache_free(conncount_rb_cachep, rbconn);
 }
 
+/* caller must hold tree nf_conncount_locks[] lock */
 static void tree_nodes_free(struct rb_root *root,
 			    struct nf_conncount_rb *gc_nodes[],
 			    unsigned int gc_count)
@@ -323,8 +281,10 @@
 	while (gc_count) {
 		rbconn = gc_nodes[--gc_count];
 		spin_lock(&rbconn->list.list_lock);
-		rb_erase(&rbconn->node, root);
-		call_rcu(&rbconn->rcu_head, __tree_nodes_free);
+		if (!rbconn->list.count) {
+			rb_erase(&rbconn->node, root);
+			call_rcu(&rbconn->rcu_head, __tree_nodes_free);
+		}
 		spin_unlock(&rbconn->list.list_lock);
 	}
 }
@@ -341,20 +301,19 @@
 	    struct rb_root *root,
 	    unsigned int hash,
 	    const u32 *key,
-	    u8 keylen,
 	    const struct nf_conntrack_tuple *tuple,
 	    const struct nf_conntrack_zone *zone)
 {
-	enum nf_conncount_list_add ret;
 	struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
 	struct rb_node **rbnode, *parent;
 	struct nf_conncount_rb *rbconn;
 	struct nf_conncount_tuple *conn;
 	unsigned int count = 0, gc_count = 0;
-	bool node_found = false;
+	u8 keylen = data->keylen;
+	bool do_gc = true;
 
-	spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
-
+	spin_lock_bh(&nf_conncount_locks[hash]);
+restart:
 	parent = NULL;
 	rbnode = &(root->rb_node);
 	while (*rbnode) {
@@ -368,45 +327,32 @@
 		} else if (diff > 0) {
 			rbnode = &((*rbnode)->rb_right);
 		} else {
-			/* unlikely: other cpu added node already */
-			node_found = true;
-			ret = nf_conncount_add(&rbconn->list, tuple, zone);
-			if (ret == NF_CONNCOUNT_ERR) {
+			int ret;
+
+			ret = nf_conncount_add(net, &rbconn->list, tuple, zone);
+			if (ret)
 				count = 0; /* hotdrop */
-			} else if (ret == NF_CONNCOUNT_ADDED) {
+			else
 				count = rbconn->list.count;
-			} else {
-				/* NF_CONNCOUNT_SKIP, rbconn is already
-				 * reclaimed by gc, insert a new tree node
-				 */
-				node_found = false;
-			}
-			break;
+			tree_nodes_free(root, gc_nodes, gc_count);
+			goto out_unlock;
 		}
 
 		if (gc_count >= ARRAY_SIZE(gc_nodes))
 			continue;
 
-		if (nf_conncount_gc_list(net, &rbconn->list))
+		if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
 			gc_nodes[gc_count++] = rbconn;
 	}
 
 	if (gc_count) {
 		tree_nodes_free(root, gc_nodes, gc_count);
-		/* tree_node_free before new allocation permits
-		 * allocator to re-use newly free'd object.
-		 *
-		 * This is a rare event; in most cases we will find
-		 * existing node to re-use. (or gc_count is 0).
-		 */
-
-		if (gc_count >= ARRAY_SIZE(gc_nodes))
-			schedule_gc_worker(data, hash);
+		schedule_gc_worker(data, hash);
+		gc_count = 0;
+		do_gc = false;
+		goto restart;
 	}
 
-	if (node_found)
-		goto out_unlock;
-
 	/* expected case: match, insert new node */
 	rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
 	if (rbconn == NULL)
@@ -427,10 +373,10 @@
 	count = 1;
 	rbconn->list.count = count;
 
-	rb_link_node(&rbconn->node, parent, rbnode);
+	rb_link_node_rcu(&rbconn->node, parent, rbnode);
 	rb_insert_color(&rbconn->node, root);
 out_unlock:
-	spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+	spin_unlock_bh(&nf_conncount_locks[hash]);
 	return count;
 }
 
@@ -441,7 +387,6 @@
 	   const struct nf_conntrack_tuple *tuple,
 	   const struct nf_conntrack_zone *zone)
 {
-	enum nf_conncount_list_add ret;
 	struct rb_root *root;
 	struct rb_node *parent;
 	struct nf_conncount_rb *rbconn;
@@ -454,7 +399,6 @@
 	parent = rcu_dereference_raw(root->rb_node);
 	while (parent) {
 		int diff;
-		bool addit;
 
 		rbconn = rb_entry(parent, struct nf_conncount_rb, node);
 
@@ -464,31 +408,36 @@
 		} else if (diff > 0) {
 			parent = rcu_dereference_raw(parent->rb_right);
 		} else {
-			/* same source network -> be counted! */
-			nf_conncount_lookup(net, &rbconn->list, tuple, zone,
-					    &addit);
+			int ret;
 
-			if (!addit)
+			if (!tuple) {
+				nf_conncount_gc_list(net, &rbconn->list);
 				return rbconn->list.count;
+			}
 
-			ret = nf_conncount_add(&rbconn->list, tuple, zone);
-			if (ret == NF_CONNCOUNT_ERR) {
-				return 0; /* hotdrop */
-			} else if (ret == NF_CONNCOUNT_ADDED) {
-				return rbconn->list.count;
-			} else {
-				/* NF_CONNCOUNT_SKIP, rbconn is already
-				 * reclaimed by gc, insert a new tree node
-				 */
+			spin_lock_bh(&rbconn->list.list_lock);
+			/* Node might be about to be free'd.
+			 * We need to defer to insert_tree() in this case.
+			 */
+			if (rbconn->list.count == 0) {
+				spin_unlock_bh(&rbconn->list.list_lock);
 				break;
 			}
+
+			/* same source network -> be counted! */
+			ret = __nf_conncount_add(net, &rbconn->list, tuple, zone);
+			spin_unlock_bh(&rbconn->list.list_lock);
+			if (ret)
+				return 0; /* hotdrop */
+			else
+				return rbconn->list.count;
 		}
 	}
 
 	if (!tuple)
 		return 0;
 
-	return insert_tree(net, data, root, hash, key, keylen, tuple, zone);
+	return insert_tree(net, data, root, hash, key, tuple, zone);
 }
 
 static void tree_gc_worker(struct work_struct *work)
@@ -499,27 +448,47 @@
 	struct rb_node *node;
 	unsigned int tree, next_tree, gc_count = 0;
 
-	tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS;
+	tree = data->gc_tree % CONNCOUNT_SLOTS;
 	root = &data->root[tree];
 
+	local_bh_disable();
 	rcu_read_lock();
 	for (node = rb_first(root); node != NULL; node = rb_next(node)) {
 		rbconn = rb_entry(node, struct nf_conncount_rb, node);
 		if (nf_conncount_gc_list(data->net, &rbconn->list))
-			gc_nodes[gc_count++] = rbconn;
+			gc_count++;
 	}
 	rcu_read_unlock();
+	local_bh_enable();
+
+	cond_resched();
 
 	spin_lock_bh(&nf_conncount_locks[tree]);
+	if (gc_count < ARRAY_SIZE(gc_nodes))
+		goto next; /* do not bother */
 
-	if (gc_count) {
-		tree_nodes_free(root, gc_nodes, gc_count);
+	gc_count = 0;
+	node = rb_first(root);
+	while (node != NULL) {
+		rbconn = rb_entry(node, struct nf_conncount_rb, node);
+		node = rb_next(node);
+
+		if (rbconn->list.count > 0)
+			continue;
+
+		gc_nodes[gc_count++] = rbconn;
+		if (gc_count >= ARRAY_SIZE(gc_nodes)) {
+			tree_nodes_free(root, gc_nodes, gc_count);
+			gc_count = 0;
+		}
 	}
 
+	tree_nodes_free(root, gc_nodes, gc_count);
+next:
 	clear_bit(tree, data->pending_trees);
 
 	next_tree = (tree + 1) % CONNCOUNT_SLOTS;
-	next_tree = find_next_bit(data->pending_trees, next_tree, CONNCOUNT_SLOTS);
+	next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
 
 	if (next_tree < CONNCOUNT_SLOTS) {
 		data->gc_tree = next_tree;
@@ -621,10 +590,7 @@
 {
 	int i;
 
-	BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
-	BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
-
-	for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
+	for (i = 0; i < CONNCOUNT_SLOTS; ++i)
 		spin_lock_init(&nf_conncount_locks[i]);
 
 	conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index a975efd..9da3034 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -115,12 +115,12 @@
 /* TCP SACK sequence number adjustment */
 static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
 				      unsigned int protoff,
-				      struct tcphdr *tcph,
 				      struct nf_conn *ct,
 				      enum ip_conntrack_info ctinfo)
 {
-	unsigned int dir, optoff, optend;
+	struct tcphdr *tcph = (void *)skb->data + protoff;
 	struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+	unsigned int dir, optoff, optend;
 
 	optoff = protoff + sizeof(struct tcphdr);
 	optend = protoff + tcph->doff * 4;
@@ -128,6 +128,7 @@
 	if (!skb_make_writable(skb, optend))
 		return 0;
 
+	tcph = (void *)skb->data + protoff;
 	dir = CTINFO2DIR(ctinfo);
 
 	while (optoff < optend) {
@@ -207,7 +208,7 @@
 		 ntohl(newack));
 	tcph->ack_seq = newack;
 
-	res = nf_ct_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+	res = nf_ct_sack_adjust(skb, protoff, ct, ctinfo);
 out:
 	spin_unlock_bh(&ct->lock);
 
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index e2b1960..2268b10 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -117,7 +117,8 @@
 	dst = skb_dst(skb);
 	if (dst->xfrm)
 		dst = ((struct xfrm_dst *)dst)->route;
-	dst_hold(dst);
+	if (!dst_hold_safe(dst))
+		return -EHOSTUNREACH;
 
 	if (sk && !net_eq(net, sock_net(sk)))
 		sk = NULL;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index fe0558b..ed9af46 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1199,7 +1199,8 @@
 		if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
 			goto nla_put_failure;
 
-		if (basechain->stats && nft_dump_stats(skb, basechain->stats))
+		if (rcu_access_pointer(basechain->stats) &&
+		    nft_dump_stats(skb, rcu_dereference(basechain->stats)))
 			goto nla_put_failure;
 	}
 
@@ -1375,7 +1376,8 @@
 	return newstats;
 }
 
-static void nft_chain_stats_replace(struct nft_base_chain *chain,
+static void nft_chain_stats_replace(struct net *net,
+				    struct nft_base_chain *chain,
 				    struct nft_stats __percpu *newstats)
 {
 	struct nft_stats __percpu *oldstats;
@@ -1383,8 +1385,9 @@
 	if (newstats == NULL)
 		return;
 
-	if (chain->stats) {
-		oldstats = nfnl_dereference(chain->stats, NFNL_SUBSYS_NFTABLES);
+	if (rcu_access_pointer(chain->stats)) {
+		oldstats = rcu_dereference_protected(chain->stats,
+					lockdep_commit_lock_is_held(net));
 		rcu_assign_pointer(chain->stats, newstats);
 		synchronize_rcu();
 		free_percpu(oldstats);
@@ -1421,9 +1424,10 @@
 		struct nft_base_chain *basechain = nft_base_chain(chain);
 
 		module_put(basechain->type->owner);
-		free_percpu(basechain->stats);
-		if (basechain->stats)
+		if (rcu_access_pointer(basechain->stats)) {
 			static_branch_dec(&nft_counters_enabled);
+			free_percpu(rcu_dereference_raw(basechain->stats));
+		}
 		kfree(chain->name);
 		kfree(basechain);
 	} else {
@@ -1572,7 +1576,7 @@
 				kfree(basechain);
 				return PTR_ERR(stats);
 			}
-			basechain->stats = stats;
+			rcu_assign_pointer(basechain->stats, stats);
 			static_branch_inc(&nft_counters_enabled);
 		}
 
@@ -6145,7 +6149,8 @@
 		return;
 
 	basechain = nft_base_chain(trans->ctx.chain);
-	nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans));
+	nft_chain_stats_replace(trans->ctx.net, basechain,
+				nft_trans_chain_stats(trans));
 
 	switch (nft_trans_chain_policy(trans)) {
 	case NF_DROP:
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index ffd5c0f..60f258f 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -101,7 +101,7 @@
 	struct nft_stats *stats;
 
 	base_chain = nft_base_chain(chain);
-	if (!base_chain->stats)
+	if (!rcu_access_pointer(base_chain->stats))
 		return;
 
 	local_bh_disable();
diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
index b90d96b..af1497a 100644
--- a/net/netfilter/nft_connlimit.c
+++ b/net/netfilter/nft_connlimit.c
@@ -30,7 +30,6 @@
 	enum ip_conntrack_info ctinfo;
 	const struct nf_conn *ct;
 	unsigned int count;
-	bool addit;
 
 	tuple_ptr = &tuple;
 
@@ -44,19 +43,12 @@
 		return;
 	}
 
-	nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone,
-			    &addit);
-	count = priv->list.count;
-
-	if (!addit)
-		goto out;
-
-	if (nf_conncount_add(&priv->list, tuple_ptr, zone) == NF_CONNCOUNT_ERR) {
+	if (nf_conncount_add(nft_net(pkt), &priv->list, tuple_ptr, zone)) {
 		regs->verdict.code = NF_DROP;
 		return;
 	}
-	count++;
-out:
+
+	count = priv->list.count;
 
 	if ((count > priv->limit) ^ priv->invert) {
 		regs->verdict.code = NFT_BREAK;
diff --git a/net/netfilter/xt_HARDIDLETIMER.c b/net/netfilter/xt_HARDIDLETIMER.c
new file mode 100644
index 0000000..055763b
--- /dev/null
+++ b/net/netfilter/xt_HARDIDLETIMER.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* linux/net/netfilter/xt_HARDIDLETIMER.c
+ *
+ * Netfilter module to trigger a timer when packet matches.
+ * After timer expires a kevent will be sent.
+ *
+ * Copyright (c) 2014-2015, 2017-2019, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (C) 2004, 2010 Nokia Corporation
+ *
+ * Written by Timo Teras <ext-timo.teras@nokia.com>
+ *
+ * Converted to x_tables and reworked for upstream inclusion
+ * by Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/alarmtimer.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_HARDIDLETIMER.h>
+#include <linux/kdev_t.h>
+#include <linux/kobject.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+#include <net/net_namespace.h>
+
+struct hardidletimer_tg_attr {
+	struct attribute attr;
+	ssize_t	(*show)(struct kobject *kobj,
+			struct attribute *attr, char *buf);
+};
+
+struct hardidletimer_tg {
+	struct list_head entry;
+	struct alarm alarm;
+	struct work_struct work;
+
+	struct kobject *kobj;
+	struct hardidletimer_tg_attr attr;
+
+	unsigned int refcnt;
+	bool send_nl_msg;
+	bool active;
+};
+
+static LIST_HEAD(hardidletimer_tg_list);
+static DEFINE_MUTEX(list_mutex);
+
+static struct kobject *hardidletimer_tg_kobj;
+
+static void notify_netlink_uevent(const char *iface,
+				  struct hardidletimer_tg *timer)
+{
+	char iface_msg[NLMSG_MAX_SIZE];
+	char state_msg[NLMSG_MAX_SIZE];
+	char *envp[] = { iface_msg, state_msg, NULL };
+	int res;
+
+	res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
+		       iface);
+	if (res >= NLMSG_MAX_SIZE) {
+		pr_err("message too long (%d)", res);
+		return;
+	}
+	res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s",
+		       timer->active ? "active" : "inactive");
+	if (res >= NLMSG_MAX_SIZE) {
+		pr_err("message too long (%d)", res);
+		return;
+	}
+	pr_debug("putting nlmsg: <%s> <%s>\n", iface_msg, state_msg);
+	kobject_uevent_env(hardidletimer_tg_kobj, KOBJ_CHANGE, envp);
+}
+
+static
+struct hardidletimer_tg *__hardidletimer_tg_find_by_label(const char *label)
+{
+	struct hardidletimer_tg *entry;
+
+	WARN_ON(!label);
+
+	list_for_each_entry(entry, &hardidletimer_tg_list, entry) {
+		if (!strcmp(label, entry->attr.attr.name))
+			return entry;
+	}
+
+	return NULL;
+}
+
+static ssize_t hardidletimer_tg_show(struct kobject *kobj,
+				     struct attribute *attr, char *buf)
+{
+	struct hardidletimer_tg *timer;
+	ktime_t expires;
+	struct timespec ktimespec;
+
+	memset(&ktimespec, 0, sizeof(struct timespec));
+	mutex_lock(&list_mutex);
+
+	timer =	__hardidletimer_tg_find_by_label(attr->name);
+	if (timer) {
+		expires = alarm_expires_remaining(&timer->alarm);
+		ktimespec = ktime_to_timespec(expires);
+	}
+
+	mutex_unlock(&list_mutex);
+
+	if (ktimespec.tv_sec >= 0)
+		return snprintf(buf, PAGE_SIZE, "%ld\n", ktimespec.tv_sec);
+
+	if ((timer) && timer->send_nl_msg)
+		return snprintf(buf, PAGE_SIZE, "0 %ld\n", ktimespec.tv_sec);
+	else
+		return snprintf(buf, PAGE_SIZE, "0\n");
+}
+
+static void hardidletimer_tg_work(struct work_struct *work)
+{
+	struct hardidletimer_tg *timer = container_of(work,
+				struct hardidletimer_tg, work);
+
+	sysfs_notify(hardidletimer_tg_kobj, NULL, timer->attr.attr.name);
+
+	if (timer->send_nl_msg)
+		notify_netlink_uevent(timer->attr.attr.name, timer);
+}
+
+static enum alarmtimer_restart hardidletimer_tg_alarmproc(struct alarm *alarm,
+							  ktime_t now)
+{
+	struct hardidletimer_tg *timer = alarm->data;
+
+	pr_debug("alarm %s expired\n", timer->attr.attr.name);
+
+	timer->active = false;
+	schedule_work(&timer->work);
+	return ALARMTIMER_NORESTART;
+}
+
+static int hardidletimer_tg_create(struct hardidletimer_tg_info *info)
+{
+	int ret;
+	ktime_t tout;
+
+	info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+	if (!info->timer) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
+	if (!info->timer->attr.attr.name) {
+		ret = -ENOMEM;
+		goto out_free_timer;
+	}
+	info->timer->attr.attr.mode = 0444;
+	info->timer->attr.show = hardidletimer_tg_show;
+
+	ret = sysfs_create_file(hardidletimer_tg_kobj, &info->timer->attr.attr);
+	if (ret < 0) {
+		pr_debug("couldn't add file to sysfs");
+		goto out_free_attr;
+	}
+	/*  notify userspace  */
+	kobject_uevent(hardidletimer_tg_kobj, KOBJ_ADD);
+
+	list_add(&info->timer->entry, &hardidletimer_tg_list);
+
+	alarm_init(&info->timer->alarm, ALARM_BOOTTIME,
+		   hardidletimer_tg_alarmproc);
+	info->timer->alarm.data = info->timer;
+	info->timer->refcnt = 1;
+	info->timer->send_nl_msg = (info->send_nl_msg == 0) ? false : true;
+	info->timer->active = true;
+	tout = ktime_set(info->timeout, 0);
+	alarm_start_relative(&info->timer->alarm, tout);
+
+	INIT_WORK(&info->timer->work, hardidletimer_tg_work);
+
+	return 0;
+
+out_free_attr:
+	kfree(info->timer->attr.attr.name);
+out_free_timer:
+	kfree(info->timer);
+out:
+	return ret;
+}
+
+/* The actual xt_tables plugin. */
+static unsigned int hardidletimer_tg_target(struct sk_buff *skb,
+					    const struct xt_action_param *par)
+{
+	const struct hardidletimer_tg_info *info = par->targinfo;
+	ktime_t tout;
+
+	pr_debug("resetting timer %s, timeout period %u\n",
+		 info->label, info->timeout);
+
+	WARN_ON(!info->timer);
+
+	if (!info->timer->active) {
+		schedule_work(&info->timer->work);
+		pr_debug("Starting timer %s\n", info->label);
+	}
+
+	info->timer->active = true;
+	/* TODO: Avoid modifying timers on each packet */
+	tout = ktime_set(info->timeout, 0);
+	alarm_start_relative(&info->timer->alarm, tout);
+
+	return XT_CONTINUE;
+}
+
+static int hardidletimer_tg_checkentry(const struct xt_tgchk_param *par)
+{
+	struct hardidletimer_tg_info *info = par->targinfo;
+	int ret;
+	ktime_t tout;
+	struct timespec ktimespec;
+
+	memset(&ktimespec, 0, sizeof(struct timespec));
+
+	pr_debug("checkentry targinfo %s\n", info->label);
+
+	if (info->timeout == 0) {
+		pr_debug("timeout value is zero\n");
+		return -EINVAL;
+	}
+
+	if (info->label[0] == '\0' ||
+	    strnlen(info->label, MAX_HARDIDLETIMER_LABEL_SIZE)
+				== MAX_HARDIDLETIMER_LABEL_SIZE) {
+		pr_debug("label is empty or not nul-terminated\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&list_mutex);
+
+	info->timer = __hardidletimer_tg_find_by_label(info->label);
+	if (info->timer) {
+		info->timer->refcnt++;
+		/* calculate remaining expiry time */
+		tout = alarm_expires_remaining(&info->timer->alarm);
+		ktimespec = ktime_to_timespec(tout);
+
+		if (ktimespec.tv_sec > 0) {
+			pr_debug("time_expiry_remaining %ld\n",
+				 ktimespec.tv_sec);
+			alarm_start_relative(&info->timer->alarm, tout);
+		}
+
+		pr_debug("increased refcnt of timer %s to %u\n",
+			 info->label, info->timer->refcnt);
+	} else {
+		ret = hardidletimer_tg_create(info);
+		if (ret < 0) {
+			pr_debug("failed to create timer\n");
+			mutex_unlock(&list_mutex);
+			return ret;
+		}
+	}
+
+	mutex_unlock(&list_mutex);
+
+	return 0;
+}
+
+static void hardidletimer_tg_destroy(const struct xt_tgdtor_param *par)
+{
+	const struct hardidletimer_tg_info *info = par->targinfo;
+
+	pr_debug("destroy targinfo %s\n", info->label);
+
+	mutex_lock(&list_mutex);
+
+	if (--info->timer->refcnt == 0) {
+		pr_debug("deleting timer %s\n", info->label);
+
+		list_del(&info->timer->entry);
+		alarm_cancel(&info->timer->alarm);
+		cancel_work_sync(&info->timer->work);
+		sysfs_remove_file(hardidletimer_tg_kobj,
+				  &info->timer->attr.attr);
+		kfree(info->timer->attr.attr.name);
+		kfree(info->timer);
+	} else {
+		pr_debug("decreased refcnt of timer %s to %u\n",
+			 info->label, info->timer->refcnt);
+	}
+
+	mutex_unlock(&list_mutex);
+}
+
+static struct xt_target hardidletimer_tg __read_mostly = {
+	.name		= "HARDIDLETIMER",
+	.revision	= 1,
+	.family		= NFPROTO_UNSPEC,
+	.target		= hardidletimer_tg_target,
+	.targetsize     = sizeof(struct hardidletimer_tg_info),
+	.checkentry	= hardidletimer_tg_checkentry,
+	.destroy        = hardidletimer_tg_destroy,
+	.me		= THIS_MODULE,
+};
+
+static struct class *hardidletimer_tg_class;
+
+static struct device *hardidletimer_tg_device;
+
+static int __init hardidletimer_tg_init(void)
+{
+	int err;
+
+	hardidletimer_tg_class = class_create(THIS_MODULE, "xt_hardidletimer");
+	err = PTR_ERR(hardidletimer_tg_class);
+	if (IS_ERR(hardidletimer_tg_class)) {
+		pr_debug("couldn't register device class\n");
+		goto out;
+	}
+
+	hardidletimer_tg_device = device_create(hardidletimer_tg_class, NULL,
+						MKDEV(0, 0), NULL, "timers");
+	err = PTR_ERR(hardidletimer_tg_device);
+	if (IS_ERR(hardidletimer_tg_device)) {
+		pr_debug("couldn't register system device\n");
+		goto out_class;
+	}
+
+	hardidletimer_tg_kobj = &hardidletimer_tg_device->kobj;
+
+	err = xt_register_target(&hardidletimer_tg);
+	if (err < 0) {
+		pr_debug("couldn't register xt target\n");
+		goto out_dev;
+	}
+
+	return 0;
+out_dev:
+	device_destroy(hardidletimer_tg_class, MKDEV(0, 0));
+out_class:
+	class_destroy(hardidletimer_tg_class);
+out:
+	return err;
+}
+
+static void __exit hardidletimer_tg_exit(void)
+{
+	xt_unregister_target(&hardidletimer_tg);
+
+	device_destroy(hardidletimer_tg_class, MKDEV(0, 0));
+	class_destroy(hardidletimer_tg_class);
+}
+
+module_init(hardidletimer_tg_init);
+module_exit(hardidletimer_tg_exit);
+
+MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_DESCRIPTION("Xtables: idle time monitor");
+MODULE_LICENSE("GPL v2");
+
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 9271d88..3a1deec 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -327,6 +327,8 @@
 		pr_debug("couldn't add file to sysfs");
 		goto out_free_attr;
 	}
+	/* notify userspace */
+	kobject_uevent(idletimer_tg_kobj, KOBJ_ADD);
 
 	list_add(&info->timer->entry, &idletimer_tg_list);
 
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 03f37c4..1d3144d 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -153,7 +153,7 @@
 	sk_for_each(s, &nr_list)
 		if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
 		    s->sk_state == TCP_LISTEN) {
-			bh_lock_sock(s);
+			sock_hold(s);
 			goto found;
 		}
 	s = NULL;
@@ -174,7 +174,7 @@
 		struct nr_sock *nr = nr_sk(s);
 
 		if (nr->my_index == index && nr->my_id == id) {
-			bh_lock_sock(s);
+			sock_hold(s);
 			goto found;
 		}
 	}
@@ -198,7 +198,7 @@
 
 		if (nr->your_index == index && nr->your_id == id &&
 		    !ax25cmp(&nr->dest_addr, dest)) {
-			bh_lock_sock(s);
+			sock_hold(s);
 			goto found;
 		}
 	}
@@ -224,7 +224,7 @@
 		if (i != 0 && j != 0) {
 			if ((sk=nr_find_socket(i, j)) == NULL)
 				break;
-			bh_unlock_sock(sk);
+			sock_put(sk);
 		}
 
 		id++;
@@ -920,6 +920,7 @@
 	}
 
 	if (sk != NULL) {
+		bh_lock_sock(sk);
 		skb_reset_transport_header(skb);
 
 		if (frametype == NR_CONNACK && skb->len == 22)
@@ -929,6 +930,7 @@
 
 		ret = nr_process_rx_frame(sk, skb);
 		bh_unlock_sock(sk);
+		sock_put(sk);
 		return ret;
 	}
 
@@ -960,10 +962,12 @@
 	    (make = nr_make_new(sk)) == NULL) {
 		nr_transmit_refusal(skb, 0);
 		if (sk)
-			bh_unlock_sock(sk);
+			sock_put(sk);
 		return 0;
 	}
 
+	bh_lock_sock(sk);
+
 	window = skb->data[20];
 
 	skb->sk             = make;
@@ -1016,6 +1020,7 @@
 		sk->sk_data_ready(sk);
 
 	bh_unlock_sock(sk);
+	sock_put(sk);
 
 	nr_insert_socket(make);
 
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index cbd51ed..908e53a 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -52,21 +52,21 @@
 {
 	struct nr_sock *nr = nr_sk(sk);
 
-	mod_timer(&nr->t1timer, jiffies + nr->t1);
+	sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
 }
 
 void nr_start_t2timer(struct sock *sk)
 {
 	struct nr_sock *nr = nr_sk(sk);
 
-	mod_timer(&nr->t2timer, jiffies + nr->t2);
+	sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
 }
 
 void nr_start_t4timer(struct sock *sk)
 {
 	struct nr_sock *nr = nr_sk(sk);
 
-	mod_timer(&nr->t4timer, jiffies + nr->t4);
+	sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
 }
 
 void nr_start_idletimer(struct sock *sk)
@@ -74,37 +74,37 @@
 	struct nr_sock *nr = nr_sk(sk);
 
 	if (nr->idle > 0)
-		mod_timer(&nr->idletimer, jiffies + nr->idle);
+		sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
 }
 
 void nr_start_heartbeat(struct sock *sk)
 {
-	mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
+	sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
 }
 
 void nr_stop_t1timer(struct sock *sk)
 {
-	del_timer(&nr_sk(sk)->t1timer);
+	sk_stop_timer(sk, &nr_sk(sk)->t1timer);
 }
 
 void nr_stop_t2timer(struct sock *sk)
 {
-	del_timer(&nr_sk(sk)->t2timer);
+	sk_stop_timer(sk, &nr_sk(sk)->t2timer);
 }
 
 void nr_stop_t4timer(struct sock *sk)
 {
-	del_timer(&nr_sk(sk)->t4timer);
+	sk_stop_timer(sk, &nr_sk(sk)->t4timer);
 }
 
 void nr_stop_idletimer(struct sock *sk)
 {
-	del_timer(&nr_sk(sk)->idletimer);
+	sk_stop_timer(sk, &nr_sk(sk)->idletimer);
 }
 
 void nr_stop_heartbeat(struct sock *sk)
 {
-	del_timer(&sk->sk_timer);
+	sk_stop_timer(sk, &sk->sk_timer);
 }
 
 int nr_t1timer_running(struct sock *sk)
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 865ecef..c7b6010 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -500,7 +500,7 @@
 			return -EINVAL;
 		}
 
-		if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
+		if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
 			attrs |= 1 << type;
 			a[type] = nla;
 		}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 100ce98..b0b4b0e 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2625,8 +2625,10 @@
 						sll_addr)))
 			goto out;
 		proto	= saddr->sll_protocol;
-		addr	= saddr->sll_addr;
+		addr	= saddr->sll_halen ? saddr->sll_addr : NULL;
 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
+		if (addr && dev && saddr->sll_halen < dev->addr_len)
+			goto out_put;
 	}
 
 	err = -ENXIO;
@@ -2823,8 +2825,10 @@
 		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
 			goto out;
 		proto	= saddr->sll_protocol;
-		addr	= saddr->sll_addr;
+		addr	= saddr->sll_halen ? saddr->sll_addr : NULL;
 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
+		if (addr && dev && saddr->sll_halen < dev->addr_len)
+			goto out_unlock;
 	}
 
 	err = -ENXIO;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 77e9f85..f2ff21d 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -850,6 +850,7 @@
 
 /*
  *	Route a frame to an appropriate AX.25 connection.
+ *	A NULL ax25_cb indicates an internally generated frame.
  */
 int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
 {
@@ -867,6 +868,10 @@
 
 	if (skb->len < ROSE_MIN_LEN)
 		return res;
+
+	if (!ax25)
+		return rose_loopback_queue(skb, NULL);
+
 	frametype = skb->data[2];
 	lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
 	if (frametype == ROSE_CALL_REQUEST &&
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 681f6f0..0f6601f 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -197,6 +197,15 @@
 	[TCA_TUNNEL_KEY_ENC_TTL]      = { .type = NLA_U8 },
 };
 
+static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
+{
+	if (!p)
+		return;
+	if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
+		dst_release(&p->tcft_enc_metadata->dst);
+	kfree_rcu(p, rcu);
+}
+
 static int tunnel_key_init(struct net *net, struct nlattr *nla,
 			   struct nlattr *est, struct tc_action **a,
 			   int ovr, int bind, bool rtnl_held,
@@ -360,8 +369,7 @@
 	rcu_swap_protected(t->params, params_new,
 			   lockdep_is_held(&t->tcf_lock));
 	spin_unlock_bh(&t->tcf_lock);
-	if (params_new)
-		kfree_rcu(params_new, rcu);
+	tunnel_key_release_params(params_new);
 
 	if (ret == ACT_P_CREATED)
 		tcf_idr_insert(tn, *a);
@@ -385,12 +393,7 @@
 	struct tcf_tunnel_key_params *params;
 
 	params = rcu_dereference_protected(t->params, 1);
-	if (params) {
-		if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
-			dst_release(&params->tcft_enc_metadata->dst);
-
-		kfree_rcu(params, rcu);
-	}
+	tunnel_key_release_params(params);
 }
 
 static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 70f144a..2167c6c 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -960,7 +960,6 @@
 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 		 struct tcf_result *res, bool compat_mode)
 {
-	__be16 protocol = tc_skb_protocol(skb);
 #ifdef CONFIG_NET_CLS_ACT
 	const int max_reclassify_loop = 4;
 	const struct tcf_proto *orig_tp = tp;
@@ -970,6 +969,7 @@
 reclassify:
 #endif
 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
+		__be16 protocol = tc_skb_protocol(skb);
 		int err;
 
 		if (tp->protocol != protocol &&
@@ -1002,7 +1002,6 @@
 	}
 
 	tp = first_tp;
-	protocol = tc_skb_protocol(skb);
 	goto reclassify;
 #endif
 }
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 7fade71..84893bc 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1176,17 +1176,23 @@
 	struct cls_fl_head *head = rtnl_dereference(tp->root);
 	struct cls_fl_filter *fold = *arg;
 	struct cls_fl_filter *fnew;
+	struct fl_flow_mask *mask;
 	struct nlattr **tb;
-	struct fl_flow_mask mask = {};
 	int err;
 
 	if (!tca[TCA_OPTIONS])
 		return -EINVAL;
 
-	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
-	if (!tb)
+	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
+	if (!mask)
 		return -ENOBUFS;
 
+	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
+	if (!tb) {
+		err = -ENOBUFS;
+		goto errout_mask_alloc;
+	}
+
 	err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
 			       fl_policy, NULL);
 	if (err < 0)
@@ -1229,12 +1235,12 @@
 		}
 	}
 
-	err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
+	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
 			   tp->chain->tmplt_priv, extack);
 	if (err)
 		goto errout_idr;
 
-	err = fl_check_assign_mask(head, fnew, fold, &mask);
+	err = fl_check_assign_mask(head, fnew, fold, mask);
 	if (err)
 		goto errout_idr;
 
@@ -1281,6 +1287,7 @@
 	}
 
 	kfree(tb);
+	kfree(mask);
 	return 0;
 
 errout_mask:
@@ -1294,6 +1301,8 @@
 	kfree(fnew);
 errout_tb:
 	kfree(tb);
+errout_mask_alloc:
+	kfree(mask);
 	return err;
 }
 
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index fc6c5e4..4fede55 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -97,10 +97,9 @@
 
 	switch (ev) {
 	case NETDEV_UP:
-		addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+		addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
 		if (addr) {
 			addr->a.v6.sin6_family = AF_INET6;
-			addr->a.v6.sin6_port = 0;
 			addr->a.v6.sin6_addr = ifa->addr;
 			addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
 			addr->valid = 1;
@@ -278,7 +277,8 @@
 
 	if (saddr) {
 		fl6->saddr = saddr->v6.sin6_addr;
-		fl6->fl6_sport = saddr->v6.sin6_port;
+		if (!fl6->fl6_sport)
+			fl6->fl6_sport = saddr->v6.sin6_port;
 
 		pr_debug("src=%pI6 - ", &fl6->saddr);
 	}
@@ -430,7 +430,6 @@
 		addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
 		if (addr) {
 			addr->a.v6.sin6_family = AF_INET6;
-			addr->a.v6.sin6_port = 0;
 			addr->a.v6.sin6_addr = ifp->addr;
 			addr->a.v6.sin6_scope_id = dev->ifindex;
 			addr->valid = 1;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e948db2..1c9f079 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -101,7 +101,6 @@
 		addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
 		if (addr) {
 			addr->a.v4.sin_family = AF_INET;
-			addr->a.v4.sin_port = 0;
 			addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
 			addr->valid = 1;
 			INIT_LIST_HEAD(&addr->list);
@@ -441,7 +440,8 @@
 	}
 	if (saddr) {
 		fl4->saddr = saddr->v4.sin_addr.s_addr;
-		fl4->fl4_sport = saddr->v4.sin_port;
+		if (!fl4->fl4_sport)
+			fl4->fl4_sport = saddr->v4.sin_port;
 	}
 
 	pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr,
@@ -776,10 +776,9 @@
 
 	switch (ev) {
 	case NETDEV_UP:
-		addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+		addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
 		if (addr) {
 			addr->a.v4.sin_family = AF_INET;
-			addr->a.v4.sin_port = 0;
 			addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
 			addr->valid = 1;
 			spin_lock_bh(&net->sctp.local_addr_lock);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index f4ac6c5..d05c576 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -495,7 +495,10 @@
 	 *
 	 * [INIT ACK back to where the INIT came from.]
 	 */
-	retval->transport = chunk->transport;
+	if (chunk->transport)
+		retval->transport =
+			sctp_assoc_lookup_paddr(asoc,
+						&chunk->transport->ipaddr);
 
 	retval->subh.init_hdr =
 		sctp_addto_chunk(retval, sizeof(initack), &initack);
@@ -642,8 +645,10 @@
 	 *
 	 * [COOKIE ACK back to where the COOKIE ECHO came from.]
 	 */
-	if (retval && chunk)
-		retval->transport = chunk->transport;
+	if (retval && chunk && chunk->transport)
+		retval->transport =
+			sctp_assoc_lookup_paddr(asoc,
+						&chunk->transport->ipaddr);
 
 	return retval;
 }
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 3892e76..80e0ae5 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -585,9 +585,9 @@
 	struct sctp_strreset_outreq *outreq = param.v;
 	struct sctp_stream *stream = &asoc->stream;
 	__u32 result = SCTP_STRRESET_DENIED;
-	__u16 i, nums, flags = 0;
 	__be16 *str_p = NULL;
 	__u32 request_seq;
+	__u16 i, nums;
 
 	request_seq = ntohl(outreq->request_seq);
 
@@ -615,6 +615,15 @@
 	if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
 		goto out;
 
+	nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
+	str_p = outreq->list_of_streams;
+	for (i = 0; i < nums; i++) {
+		if (ntohs(str_p[i]) >= stream->incnt) {
+			result = SCTP_STRRESET_ERR_WRONG_SSN;
+			goto out;
+		}
+	}
+
 	if (asoc->strreset_chunk) {
 		if (!sctp_chunk_lookup_strreset_param(
 				asoc, outreq->response_seq,
@@ -637,32 +646,19 @@
 			sctp_chunk_put(asoc->strreset_chunk);
 			asoc->strreset_chunk = NULL;
 		}
-
-		flags = SCTP_STREAM_RESET_INCOMING_SSN;
 	}
 
-	nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
-	if (nums) {
-		str_p = outreq->list_of_streams;
-		for (i = 0; i < nums; i++) {
-			if (ntohs(str_p[i]) >= stream->incnt) {
-				result = SCTP_STRRESET_ERR_WRONG_SSN;
-				goto out;
-			}
-		}
-
+	if (nums)
 		for (i = 0; i < nums; i++)
 			SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
-	} else {
+	else
 		for (i = 0; i < stream->incnt; i++)
 			SCTP_SI(stream, i)->mid = 0;
-	}
 
 	result = SCTP_STRRESET_PERFORMED;
 
 	*evp = sctp_ulpevent_make_stream_reset_event(asoc,
-		flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p,
-		GFP_ATOMIC);
+		SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
 
 out:
 	sctp_update_strreset_result(asoc, result);
@@ -738,9 +734,6 @@
 
 	result = SCTP_STRRESET_PERFORMED;
 
-	*evp = sctp_ulpevent_make_stream_reset_event(asoc,
-		SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
-
 out:
 	sctp_update_strreset_result(asoc, result);
 err:
@@ -873,6 +866,14 @@
 	if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
 		goto out;
 
+	in = ntohs(addstrm->number_of_streams);
+	incnt = stream->incnt + in;
+	if (!in || incnt > SCTP_MAX_STREAM)
+		goto out;
+
+	if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
+		goto out;
+
 	if (asoc->strreset_chunk) {
 		if (!sctp_chunk_lookup_strreset_param(
 			asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
@@ -896,14 +897,6 @@
 		}
 	}
 
-	in = ntohs(addstrm->number_of_streams);
-	incnt = stream->incnt + in;
-	if (!in || incnt > SCTP_MAX_STREAM)
-		goto out;
-
-	if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
-		goto out;
-
 	stream->incnt = incnt;
 
 	result = SCTP_STRRESET_PERFORMED;
@@ -973,9 +966,6 @@
 
 	result = SCTP_STRRESET_PERFORMED;
 
-	*evp = sctp_ulpevent_make_stream_change_event(asoc,
-		0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
-
 out:
 	sctp_update_strreset_result(asoc, result);
 err:
@@ -1036,10 +1026,10 @@
 					sout->mid_uo = 0;
 				}
 			}
-
-			flags = SCTP_STREAM_RESET_OUTGOING_SSN;
 		}
 
+		flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
+
 		for (i = 0; i < stream->outcnt; i++)
 			SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
 
@@ -1058,6 +1048,8 @@
 		nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
 		       sizeof(__u16);
 
+		flags |= SCTP_STREAM_RESET_INCOMING_SSN;
+
 		*evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
 			nums, str_p, GFP_ATOMIC);
 	} else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 80e2119..e6e506b 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -144,9 +144,18 @@
 		sock_set_flag(sk, SOCK_DEAD);
 		sk->sk_shutdown |= SHUTDOWN_MASK;
 	}
+
+	sk->sk_prot->unhash(sk);
+
 	if (smc->clcsock) {
+		if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
+			/* wake up clcsock accept */
+			rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
+		}
+		mutex_lock(&smc->clcsock_release_lock);
 		sock_release(smc->clcsock);
 		smc->clcsock = NULL;
+		mutex_unlock(&smc->clcsock_release_lock);
 	}
 	if (smc->use_fallback) {
 		if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
@@ -162,7 +171,6 @@
 		smc_conn_free(&smc->conn);
 	release_sock(sk);
 
-	sk->sk_prot->unhash(sk);
 	sock_put(sk); /* final sock_put */
 out:
 	return rc;
@@ -203,6 +211,7 @@
 	spin_lock_init(&smc->conn.send_lock);
 	sk->sk_prot->hash(sk);
 	sk_refcnt_debug_inc(sk);
+	mutex_init(&smc->clcsock_release_lock);
 
 	return sk;
 }
@@ -818,7 +827,7 @@
 	struct socket *new_clcsock = NULL;
 	struct sock *lsk = &lsmc->sk;
 	struct sock *new_sk;
-	int rc;
+	int rc = -EINVAL;
 
 	release_sock(lsk);
 	new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
@@ -831,7 +840,10 @@
 	}
 	*new_smc = smc_sk(new_sk);
 
-	rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
+	mutex_lock(&lsmc->clcsock_release_lock);
+	if (lsmc->clcsock)
+		rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
+	mutex_unlock(&lsmc->clcsock_release_lock);
 	lock_sock(lsk);
 	if  (rc < 0)
 		lsk->sk_err = -rc;
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 08786ac..5721416 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -219,6 +219,10 @@
 						 * started, waiting for unsent
 						 * data to be sent
 						 */
+	struct mutex            clcsock_release_lock;
+						/* protects clcsock of a listen
+						 * socket
+						 * */
 };
 
 static inline struct smc_sock *smc_sk(const struct sock *sk)
diff --git a/net/socket.c b/net/socket.c
index 390a8ec..e286674 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -115,6 +115,8 @@
 
 static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to);
 static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
+static BLOCKING_NOTIFIER_HEAD(sockev_notifier_list);
+
 static int sock_mmap(struct file *file, struct vm_area_struct *vma);
 
 static int sock_close(struct inode *inode, struct file *file);
@@ -163,6 +165,14 @@
 static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
 
 /*
+ * Socket Event framework helpers
+ */
+static void sockev_notify(unsigned long event, struct socket *sk)
+{
+	blocking_notifier_call_chain(&sockev_notifier_list, event, sk);
+}
+
+/**
  * Support routines.
  * Move socket addresses back and forth across the kernel/user
  * divide and look after the messy bits.
@@ -1348,6 +1358,9 @@
 	if (retval < 0)
 		return retval;
 
+	if (retval == 0)
+		sockev_notify(SOCKEV_SOCKET, sock);
+
 	return sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
 }
 
@@ -1485,6 +1498,8 @@
 						      &address, addrlen);
 		}
 		fput_light(sock->file, fput_needed);
+		if (!err)
+			sockev_notify(SOCKEV_BIND, sock);
 	}
 	return err;
 }
@@ -1517,6 +1532,8 @@
 			err = sock->ops->listen(sock, backlog);
 
 		fput_light(sock->file, fput_needed);
+		if (!err)
+			sockev_notify(SOCKEV_LISTEN, sock);
 	}
 	return err;
 }
@@ -1608,7 +1625,8 @@
 
 	fd_install(newfd, newfile);
 	err = newfd;
-
+	if (!err)
+		sockev_notify(SOCKEV_ACCEPT, sock);
 out_put:
 	fput_light(sock->file, fput_needed);
 out:
@@ -1663,6 +1681,8 @@
 
 	err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen,
 				 sock->file->f_flags);
+	if (!err)
+		sockev_notify(SOCKEV_CONNECT, sock);
 out_put:
 	fput_light(sock->file, fput_needed);
 out:
@@ -1961,6 +1981,7 @@
 
 	sock = sockfd_lookup_light(fd, &err, &fput_needed);
 	if (sock != NULL) {
+		sockev_notify(SOCKEV_SHUTDOWN, sock);
 		err = security_socket_shutdown(sock, how);
 		if (!err)
 			err = sock->ops->shutdown(sock, how);
@@ -3397,3 +3418,14 @@
 	}
 }
 EXPORT_SYMBOL(kernel_sock_ip_overhead);
+int sockev_register_notify(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&sockev_notifier_list, nb);
+}
+EXPORT_SYMBOL(sockev_register_notify);
+
+int sockev_unregister_notify(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&sockev_notifier_list, nb);
+}
+EXPORT_SYMBOL(sockev_unregister_notify);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 860f2a1b..1a65f88 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1122,7 +1122,7 @@
 	struct kvec *resv = &rqstp->rq_res.head[0];
 	struct rsi *rsip, rsikey;
 	int ret;
-	struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
 
 	memset(&rsikey, 0, sizeof(rsikey));
 	ret = gss_read_verf(gc, argv, authp,
@@ -1233,7 +1233,7 @@
 	uint64_t handle;
 	int status;
 	int ret;
-	struct net *net = rqstp->rq_xprt->xpt_net;
+	struct net *net = SVC_NET(rqstp);
 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 
 	memset(&ud, 0, sizeof(ud));
@@ -1424,7 +1424,7 @@
 	__be32		*rpcstart;
 	__be32		*reject_stat = resv->iov_base + resv->iov_len;
 	int		ret;
-	struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
 
 	dprintk("RPC:       svcauth_gss: argv->iov_len = %zd\n",
 			argv->iov_len);
@@ -1714,7 +1714,7 @@
 	struct rpc_gss_wire_cred *gc = &gsd->clcred;
 	struct xdr_buf *resbuf = &rqstp->rq_res;
 	int stat = -EINVAL;
-	struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
 
 	if (gc->gc_proc != RPC_GSS_PROC_DATA)
 		goto out;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 109fbe5..b6e8ecc 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -54,6 +54,11 @@
 	h->last_refresh = now;
 }
 
+static void cache_fresh_locked(struct cache_head *head, time_t expiry,
+				struct cache_detail *detail);
+static void cache_fresh_unlocked(struct cache_head *head,
+				struct cache_detail *detail);
+
 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
 				       struct cache_head *key, int hash)
 {
@@ -95,6 +100,7 @@
 			if (cache_is_expired(detail, tmp)) {
 				hlist_del_init(&tmp->cache_list);
 				detail->entries --;
+				cache_fresh_locked(tmp, 0, detail);
 				freeme = tmp;
 				break;
 			}
@@ -110,8 +116,10 @@
 	cache_get(new);
 	write_unlock(&detail->hash_lock);
 
-	if (freeme)
+	if (freeme) {
+		cache_fresh_unlocked(freeme, detail);
 		cache_put(freeme, detail);
+	}
 	return new;
 }
 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index c7872bc..08b5fa4 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -771,6 +771,12 @@
 	case RPCBVERS_3:
 		map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
 		map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
+		if (!map->r_addr) {
+			status = -ENOMEM;
+			dprintk("RPC: %5u %s: no memory available\n",
+				task->tk_pid, __func__);
+			goto bailout_free_args;
+		}
 		map->r_owner = "";
 		break;
 	case RPCBVERS_2:
@@ -793,6 +799,8 @@
 	rpc_put_task(child);
 	return;
 
+bailout_free_args:
+	kfree(map);
 bailout_release_client:
 	rpc_release_client(rpcb_clnt);
 bailout_nofree:
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index d13e05f..d65f8d3 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1144,6 +1144,8 @@
 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
 #endif
 
+extern void svc_tcp_prep_reply_hdr(struct svc_rqst *);
+
 /*
  * Common routine for processing the RPC request.
  */
@@ -1172,7 +1174,8 @@
 	clear_bit(RQ_DROPME, &rqstp->rq_flags);
 
 	/* Setup reply header */
-	rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
+	if (rqstp->rq_prot == IPPROTO_TCP)
+		svc_tcp_prep_reply_hdr(rqstp);
 
 	svc_putu32(resv, rqstp->rq_xid);
 
@@ -1244,7 +1247,7 @@
 	 * for lower versions. RPC_PROG_MISMATCH seems to be the closest
 	 * fit.
 	 */
-	if (versp->vs_need_cong_ctrl &&
+	if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
 	    !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
 		goto err_bad_vers;
 
@@ -1336,7 +1339,7 @@
 	return 0;
 
  close:
-	if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
+	if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
 		svc_close_xprt(rqstp->rq_xprt);
 	dprintk("svc: svc_process close\n");
 	return 0;
@@ -1459,10 +1462,10 @@
 	dprintk("svc: %s(%p)\n", __func__, req);
 
 	/* Build the svc_rqst used by the common processing routine */
-	rqstp->rq_xprt = serv->sv_bc_xprt;
 	rqstp->rq_xid = req->rq_xid;
 	rqstp->rq_prot = req->rq_xprt->prot;
 	rqstp->rq_server = serv;
+	rqstp->rq_bc_net = req->rq_xprt->xprt_net;
 
 	rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
 	memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 83ccd02..6cf0fd3 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -469,10 +469,11 @@
  */
 void svc_reserve(struct svc_rqst *rqstp, int space)
 {
+	struct svc_xprt *xprt = rqstp->rq_xprt;
+
 	space += rqstp->rq_res.head[0].iov_len;
 
-	if (space < rqstp->rq_reserved) {
-		struct svc_xprt *xprt = rqstp->rq_xprt;
+	if (xprt && space < rqstp->rq_reserved) {
 		atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
 		rqstp->rq_reserved = space;
 
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 5445145..97a8282 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -574,7 +574,7 @@
 		/* Don't enable netstamp, sunrpc doesn't
 		   need that much accuracy */
 	}
-	svsk->sk_sk->sk_stamp = skb->tstamp;
+	sock_write_timestamp(svsk->sk_sk, skb->tstamp);
 	set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
 
 	len  = skb->len;
@@ -1198,7 +1198,7 @@
 /*
  * Setup response header. TCP has a 4B record length field.
  */
-static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
+void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
 {
 	struct kvec *resv = &rqstp->rq_res.head[0];
 
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 6b7539c..7d8cce1 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2244,8 +2244,8 @@
 	trace_rpc_socket_connect(xprt, sock, 0);
 	status = 0;
 out:
-	xprt_unlock_connect(xprt, transport);
 	xprt_clear_connecting(xprt);
+	xprt_unlock_connect(xprt, transport);
 	xprt_wake_pending_tasks(xprt, status);
 }
 
@@ -2480,8 +2480,8 @@
 	}
 	status = -EAGAIN;
 out:
-	xprt_unlock_connect(xprt, transport);
 	xprt_clear_connecting(xprt);
+	xprt_unlock_connect(xprt, transport);
 	xprt_wake_pending_tasks(xprt, status);
 }
 
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 645c160..2649a0a 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -317,7 +317,6 @@
 	res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
 	if (res) {
 		bearer_disable(net, b);
-		kfree(b);
 		errstr = "failed to create discoverer";
 		goto rejected;
 	}
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 6376467..0b21187 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -87,6 +87,11 @@
 	return limit;
 }
 
+static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
+{
+	return TLV_GET_LEN(tlv) - TLV_SPACE(0);
+}
+
 static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
 {
 	struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
@@ -166,6 +171,11 @@
 	return buf;
 }
 
+static inline bool string_is_valid(char *s, int len)
+{
+	return memchr(s, '\0', len) ? true : false;
+}
+
 static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
 				   struct tipc_nl_compat_msg *msg,
 				   struct sk_buff *arg)
@@ -379,6 +389,7 @@
 	struct nlattr *prop;
 	struct nlattr *bearer;
 	struct tipc_bearer_config *b;
+	int len;
 
 	b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
 
@@ -386,6 +397,10 @@
 	if (!bearer)
 		return -EMSGSIZE;
 
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+	if (!string_is_valid(b->name, len))
+		return -EINVAL;
+
 	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
 		return -EMSGSIZE;
 
@@ -411,6 +426,7 @@
 {
 	char *name;
 	struct nlattr *bearer;
+	int len;
 
 	name = (char *)TLV_DATA(msg->req);
 
@@ -418,6 +434,10 @@
 	if (!bearer)
 		return -EMSGSIZE;
 
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+	if (!string_is_valid(name, len))
+		return -EINVAL;
+
 	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
 		return -EMSGSIZE;
 
@@ -478,6 +498,7 @@
 	struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
 	struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
 	int err;
+	int len;
 
 	if (!attrs[TIPC_NLA_LINK])
 		return -EINVAL;
@@ -504,6 +525,11 @@
 		return err;
 
 	name = (char *)TLV_DATA(msg->req);
+
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+	if (!string_is_valid(name, len))
+		return -EINVAL;
+
 	if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
 		return 0;
 
@@ -644,6 +670,7 @@
 	struct nlattr *prop;
 	struct nlattr *media;
 	struct tipc_link_config *lc;
+	int len;
 
 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
 
@@ -651,6 +678,10 @@
 	if (!media)
 		return -EMSGSIZE;
 
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+	if (!string_is_valid(lc->name, len))
+		return -EINVAL;
+
 	if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
 		return -EMSGSIZE;
 
@@ -671,6 +702,7 @@
 	struct nlattr *prop;
 	struct nlattr *bearer;
 	struct tipc_link_config *lc;
+	int len;
 
 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
 
@@ -678,6 +710,10 @@
 	if (!bearer)
 		return -EMSGSIZE;
 
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+	if (!string_is_valid(lc->name, len))
+		return -EINVAL;
+
 	if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
 		return -EMSGSIZE;
 
@@ -726,9 +762,14 @@
 	struct tipc_link_config *lc;
 	struct tipc_bearer *bearer;
 	struct tipc_media *media;
+	int len;
 
 	lc = (struct tipc_link_config *)TLV_DATA(msg->req);
 
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+	if (!string_is_valid(lc->name, len))
+		return -EINVAL;
+
 	media = tipc_media_find(lc->name);
 	if (media) {
 		cmd->doit = &__tipc_nl_media_set;
@@ -750,6 +791,7 @@
 {
 	char *name;
 	struct nlattr *link;
+	int len;
 
 	name = (char *)TLV_DATA(msg->req);
 
@@ -757,6 +799,10 @@
 	if (!link)
 		return -EMSGSIZE;
 
+	len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+	if (!string_is_valid(name, len))
+		return -EINVAL;
+
 	if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
 		return -EMSGSIZE;
 
@@ -778,6 +824,8 @@
 	};
 
 	ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
+	if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
+		return -EINVAL;
 
 	depth = ntohl(ntq->depth);
 
@@ -1201,7 +1249,7 @@
 	}
 
 	len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
-	if (len && !TLV_OK(msg.req, len)) {
+	if (!len || !TLV_OK(msg.req, len)) {
 		msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
 		err = -EOPNOTSUPP;
 		goto send;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 366ce0b..e1bdaf0 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -878,7 +878,6 @@
 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
 	struct tipc_sock *tsk = tipc_sk(sk);
-	struct tipc_group *grp = tsk->group;
 	struct net *net = sock_net(sk);
 	struct tipc_member *mb = NULL;
 	u32 node, port;
@@ -892,7 +891,9 @@
 	/* Block or return if destination link or member is congested */
 	rc = tipc_wait_for_cond(sock, &timeout,
 				!tipc_dest_find(&tsk->cong_links, node, 0) &&
-				!tipc_group_cong(grp, node, port, blks, &mb));
+				tsk->group &&
+				!tipc_group_cong(tsk->group, node, port, blks,
+						 &mb));
 	if (unlikely(rc))
 		return rc;
 
@@ -922,7 +923,6 @@
 	struct tipc_sock *tsk = tipc_sk(sk);
 	struct list_head *cong_links = &tsk->cong_links;
 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
-	struct tipc_group *grp = tsk->group;
 	struct tipc_msg *hdr = &tsk->phdr;
 	struct tipc_member *first = NULL;
 	struct tipc_member *mbr = NULL;
@@ -939,9 +939,10 @@
 	type = msg_nametype(hdr);
 	inst = dest->addr.name.name.instance;
 	scope = msg_lookup_scope(hdr);
-	exclude = tipc_group_exclude(grp);
 
 	while (++lookups < 4) {
+		exclude = tipc_group_exclude(tsk->group);
+
 		first = NULL;
 
 		/* Look for a non-congested destination member, if any */
@@ -950,7 +951,8 @@
 						 &dstcnt, exclude, false))
 				return -EHOSTUNREACH;
 			tipc_dest_pop(&dsts, &node, &port);
-			cong = tipc_group_cong(grp, node, port, blks, &mbr);
+			cong = tipc_group_cong(tsk->group, node, port, blks,
+					       &mbr);
 			if (!cong)
 				break;
 			if (mbr == first)
@@ -969,7 +971,8 @@
 		/* Block or return if destination link or member is congested */
 		rc = tipc_wait_for_cond(sock, &timeout,
 					!tipc_dest_find(cong_links, node, 0) &&
-					!tipc_group_cong(grp, node, port,
+					tsk->group &&
+					!tipc_group_cong(tsk->group, node, port,
 							 blks, &mbr));
 		if (unlikely(rc))
 			return rc;
@@ -1004,8 +1007,7 @@
 	struct sock *sk = sock->sk;
 	struct net *net = sock_net(sk);
 	struct tipc_sock *tsk = tipc_sk(sk);
-	struct tipc_group *grp = tsk->group;
-	struct tipc_nlist *dsts = tipc_group_dests(grp);
+	struct tipc_nlist *dsts;
 	struct tipc_mc_method *method = &tsk->mc_method;
 	bool ack = method->mandatory && method->rcast;
 	int blks = tsk_blocks(MCAST_H_SIZE + dlen);
@@ -1014,15 +1016,17 @@
 	struct sk_buff_head pkts;
 	int rc = -EHOSTUNREACH;
 
-	if (!dsts->local && !dsts->remote)
-		return -EHOSTUNREACH;
-
 	/* Block or return if any destination link or member is congested */
-	rc = tipc_wait_for_cond(sock, &timeout,	!tsk->cong_link_cnt &&
-				!tipc_group_bc_cong(grp, blks));
+	rc = tipc_wait_for_cond(sock, &timeout,
+				!tsk->cong_link_cnt && tsk->group &&
+				!tipc_group_bc_cong(tsk->group, blks));
 	if (unlikely(rc))
 		return rc;
 
+	dsts = tipc_group_dests(tsk->group);
+	if (!dsts->local && !dsts->remote)
+		return -EHOSTUNREACH;
+
 	/* Complete message header */
 	if (dest) {
 		msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
@@ -1034,7 +1038,7 @@
 	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
 	msg_set_destport(hdr, 0);
 	msg_set_destnode(hdr, 0);
-	msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
+	msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
 
 	/* Avoid getting stuck with repeated forced replicasts */
 	msg_set_grp_bc_ack_req(hdr, ack);
@@ -2683,11 +2687,15 @@
 		rhashtable_walk_start(&iter);
 
 		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
-			spin_lock_bh(&tsk->sk.sk_lock.slock);
+			sock_hold(&tsk->sk);
+			rhashtable_walk_stop(&iter);
+			lock_sock(&tsk->sk);
 			msg = &tsk->phdr;
 			msg_set_prevnode(msg, tipc_own_addr(net));
 			msg_set_orignode(msg, tipc_own_addr(net));
-			spin_unlock_bh(&tsk->sk.sk_lock.slock);
+			release_sock(&tsk->sk);
+			rhashtable_walk_start(&iter);
+			sock_put(&tsk->sk);
 		}
 
 		rhashtable_walk_stop(&iter);
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index b84c005..d65eed8 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -404,7 +404,7 @@
 	ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
 	if (ret == -EWOULDBLOCK)
 		return -EWOULDBLOCK;
-	if (ret > 0) {
+	if (ret == sizeof(s)) {
 		read_lock_bh(&sk->sk_callback_lock);
 		ret = tipc_conn_rcv_sub(srv, con, &s);
 		read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 9783101..da2d311 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -245,10 +245,8 @@
 		}
 
 		err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr);
-		if (err) {
-			kfree_skb(_skb);
+		if (err)
 			goto out;
-		}
 	}
 	err = 0;
 out:
@@ -680,6 +678,11 @@
 	if (err)
 		goto err;
 
+	if (remote.proto != local.proto) {
+		err = -EINVAL;
+		goto err;
+	}
+
 	/* Autoconfigure own node identity if needed */
 	if (!tipc_own_id(net)) {
 		memcpy(node_id, local.ipv6.in6_u.u6_addr8, 16);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 523622d..a091c03 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -550,11 +550,14 @@
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct tls_context *ctx;
 
-	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
 	if (!ctx)
 		return NULL;
 
 	icsk->icsk_ulp_data = ctx;
+	ctx->setsockopt = sk->sk_prot->setsockopt;
+	ctx->getsockopt = sk->sk_prot->getsockopt;
+	ctx->sk_proto_close = sk->sk_prot->close;
 	return ctx;
 }
 
@@ -685,9 +688,6 @@
 		rc = -ENOMEM;
 		goto out;
 	}
-	ctx->setsockopt = sk->sk_prot->setsockopt;
-	ctx->getsockopt = sk->sk_prot->getsockopt;
-	ctx->sk_proto_close = sk->sk_prot->close;
 
 	/* Build IPv6 TLS whenever the address of tcpv6	_prot changes */
 	if (ip_ver == TLSV6 &&
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index cb332ad..c361ce7 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -264,6 +264,31 @@
 }
 
 static int
+vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
+				      struct sockaddr_vm *dst,
+				      enum vmci_transport_packet_type type,
+				      u64 size,
+				      u64 mode,
+				      struct vmci_transport_waiting_info *wait,
+				      u16 proto,
+				      struct vmci_handle handle)
+{
+	struct vmci_transport_packet *pkt;
+	int err;
+
+	pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+	if (!pkt)
+		return -ENOMEM;
+
+	err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
+						mode, wait, proto, handle,
+						true);
+	kfree(pkt);
+
+	return err;
+}
+
+static int
 vmci_transport_send_control_pkt(struct sock *sk,
 				enum vmci_transport_packet_type type,
 				u64 size,
@@ -272,9 +297,7 @@
 				u16 proto,
 				struct vmci_handle handle)
 {
-	struct vmci_transport_packet *pkt;
 	struct vsock_sock *vsk;
-	int err;
 
 	vsk = vsock_sk(sk);
 
@@ -284,17 +307,10 @@
 	if (!vsock_addr_bound(&vsk->remote_addr))
 		return -EINVAL;
 
-	pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
-	if (!pkt)
-		return -ENOMEM;
-
-	err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
-						&vsk->remote_addr, type, size,
-						mode, wait, proto, handle,
-						true);
-	kfree(pkt);
-
-	return err;
+	return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
+						     &vsk->remote_addr,
+						     type, size, mode,
+						     wait, proto, handle);
 }
 
 static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
@@ -312,12 +328,29 @@
 static int vmci_transport_send_reset(struct sock *sk,
 				     struct vmci_transport_packet *pkt)
 {
+	struct sockaddr_vm *dst_ptr;
+	struct sockaddr_vm dst;
+	struct vsock_sock *vsk;
+
 	if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
 		return 0;
-	return vmci_transport_send_control_pkt(sk,
-					VMCI_TRANSPORT_PACKET_TYPE_RST,
-					0, 0, NULL, VSOCK_PROTO_INVALID,
-					VMCI_INVALID_HANDLE);
+
+	vsk = vsock_sk(sk);
+
+	if (!vsock_addr_bound(&vsk->local_addr))
+		return -EINVAL;
+
+	if (vsock_addr_bound(&vsk->remote_addr)) {
+		dst_ptr = &vsk->remote_addr;
+	} else {
+		vsock_addr_init(&dst, pkt->dg.src.context,
+				pkt->src_port);
+		dst_ptr = &dst;
+	}
+	return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
+					     VMCI_TRANSPORT_PACKET_TYPE_RST,
+					     0, 0, NULL, VSOCK_PROTO_INVALID,
+					     VMCI_INVALID_HANDLE);
 }
 
 static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
diff --git a/net/wireless/.gitignore b/net/wireless/.gitignore
index 61cbc30..4a84ec1 100644
--- a/net/wireless/.gitignore
+++ b/net/wireless/.gitignore
@@ -1,2 +1,3 @@
+regdb.c
 shipped-certs.c
 extra-certs.c
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 4172204..f5062ab 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -175,8 +175,30 @@
 
 	  If unsure, say N.
 
+config CFG80211_INTERNAL_REGDB
+	bool "use statically compiled regulatory rules database" if EXPERT
+	default n
+	depends on CFG80211
+	---help---
+	  This option generates an internal data structure representing
+	  the wireless regulatory rules described in net/wireless/db.txt
+	  and includes code to query that database. This is an alternative
+	  to using CRDA for defining regulatory rules for the kernel.
+
+	  Using this option requires some parsing of the db.txt at build time,
+	  the parser will be upkept with the latest wireless-regdb updates but
+	  older wireless-regdb formats will be ignored. The parser may later
+	  be replaced to avoid issues with conflicts on versions of
+	  wireless-regdb.
+
+	  For details see:
+
+	  http://wireless.kernel.org/en/developers/Regulatory
+
+	  Most distributions have a CRDA package. So if unsure, say N.
+
 config CFG80211_CRDA_SUPPORT
-	bool "support CRDA" if EXPERT
+	bool "support CRDA" if CFG80211_INTERNAL_REGDB
 	default y
 	help
 	  You should enable this option unless you know for sure you have no
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 1d84f91..fab1c17 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -15,9 +15,16 @@
 cfg80211-$(CONFIG_OF) += of.o
 cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
 cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
+cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
 
 CFLAGS_trace.o := -I$(src)
 
+clean-files += shipped-certs.c extra-certs.c
+$(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk
+	@$(AWK) -f $(srctree)/$(src)/genregdb.awk < $< > $@
+
+clean-files := regdb.c
+
 cfg80211-$(CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS) += shipped-certs.o
 ifneq ($(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR),)
 cfg80211-y += extra-certs.o
@@ -55,4 +62,3 @@
 	      echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);'; \
 	  ) > $@)
 
-clean-files += shipped-certs.c extra-certs.c
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
new file mode 100644
index 0000000..baf2426
--- /dev/null
+++ b/net/wireless/genregdb.awk
@@ -0,0 +1,158 @@
+#!/usr/bin/awk -f
+#
+# genregdb.awk -- generate regdb.c from db.txt
+#
+# Actually, it reads from stdin (presumed to be db.txt) and writes
+# to stdout (presumed to be regdb.c), but close enough...
+#
+# Copyright 2009 John W. Linville <linville@tuxdriver.com>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+BEGIN {
+	active = 0
+	rules = 0;
+	print "/*"
+	print " * DO NOT EDIT -- file generated from data in db.txt"
+	print " */"
+	print ""
+	print "#include <linux/nl80211.h>"
+	print "#include <net/cfg80211.h>"
+	print "#include \"regdb.h\""
+	print ""
+	regdb = "const struct ieee80211_regdomain *reg_regdb[] = {\n"
+}
+
+function parse_country_head() {
+	country=$2
+	sub(/:/, "", country)
+	printf "static const struct ieee80211_regdomain regdom_%s = {\n", country
+	printf "\t.alpha2 = \"%s\",\n", country
+	if ($NF ~ /DFS-ETSI/)
+		printf "\t.dfs_region = NL80211_DFS_ETSI,\n"
+	else if ($NF ~ /DFS-FCC/)
+		printf "\t.dfs_region = NL80211_DFS_FCC,\n"
+	else if ($NF ~ /DFS-JP/)
+		printf "\t.dfs_region = NL80211_DFS_JP,\n"
+	printf "\t.reg_rules = {\n"
+	active = 1
+	regdb = regdb "\t&regdom_" country ",\n"
+}
+
+function parse_reg_rule()
+{
+	flag_starts_at = 7
+
+	start = $1
+	sub(/\(/, "", start)
+	end = $3
+	bw = $5
+	sub(/\),/, "", bw)
+	gain = 0
+	power = $6
+	# power might be in mW...
+	units = $7
+	dfs_cac = 0
+
+	sub(/\(/, "", power)
+	sub(/\),/, "", power)
+	sub(/\),/, "", units)
+	sub(/\)/, "", units)
+
+	if (units == "mW") {
+		flag_starts_at = 8
+		power = 10 * log(power)/log(10)
+		if ($8 ~ /[[:digit:]]/) {
+			flag_starts_at = 9
+			dfs_cac = $8
+		}
+	} else {
+		if ($7 ~ /[[:digit:]]/) {
+			flag_starts_at = 8
+			dfs_cac = $7
+		}
+	}
+	sub(/\(/, "", dfs_cac)
+	sub(/\),/, "", dfs_cac)
+	flagstr = ""
+	for (i=flag_starts_at; i<=NF; i++)
+		flagstr = flagstr $i
+	split(flagstr, flagarray, ",")
+	flags = ""
+	for (arg in flagarray) {
+		if (flagarray[arg] == "NO-OFDM") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_OFDM | "
+		} else if (flagarray[arg] == "NO-CCK") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_CCK | "
+		} else if (flagarray[arg] == "NO-INDOOR") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_INDOOR | "
+		} else if (flagarray[arg] == "NO-OUTDOOR") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_OUTDOOR | "
+		} else if (flagarray[arg] == "DFS") {
+			flags = flags "\n\t\t\tNL80211_RRF_DFS | "
+		} else if (flagarray[arg] == "PTP-ONLY") {
+			flags = flags "\n\t\t\tNL80211_RRF_PTP_ONLY | "
+		} else if (flagarray[arg] == "PTMP-ONLY") {
+			flags = flags "\n\t\t\tNL80211_RRF_PTMP_ONLY | "
+		} else if (flagarray[arg] == "PASSIVE-SCAN") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_IR | "
+		} else if (flagarray[arg] == "NO-IBSS") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_IR | "
+		} else if (flagarray[arg] == "NO-IR") {
+			flags = flags "\n\t\t\tNL80211_RRF_NO_IR | "
+		} else if (flagarray[arg] == "AUTO-BW") {
+			flags = flags "\n\t\t\tNL80211_RRF_AUTO_BW | "
+		}
+
+	}
+	flags = flags "0"
+	printf "\t\tREG_RULE_EXT(%d, %d, %d, %d, %.0f, %d, %s),\n", start, end, bw, gain, power, dfs_cac, flags
+	rules++
+}
+
+function print_tail_country()
+{
+	active = 0
+	printf "\t},\n"
+	printf "\t.n_reg_rules = %d\n", rules
+	printf "};\n\n"
+	rules = 0;
+}
+
+/^[ \t]*#/ {
+	# Ignore
+}
+
+!active && /^[ \t]*$/ {
+	# Ignore
+}
+
+!active && /country/ {
+	parse_country_head()
+}
+
+active && /^[ \t]*\(/ {
+	parse_reg_rule()
+}
+
+active && /^[ \t]*$/ {
+	print_tail_country()
+}
+
+END {
+	if (active)
+		print_tail_country()
+	print regdb "};"
+	print ""
+	print "int reg_regdb_size = ARRAY_SIZE(reg_regdb);"
+}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index cf91be3..ff400d0 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -8997,8 +8997,10 @@
 	if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) {
 		int r = validate_pae_over_nl80211(rdev, info);
 
-		if (r < 0)
+		if (r < 0) {
+			kzfree(connkeys);
 			return r;
+		}
 
 		ibss.control_port_over_nl80211 = true;
 	}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 97c0e6b..d5d0f31 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -61,6 +61,7 @@
 #include "core.h"
 #include "reg.h"
 #include "rdev-ops.h"
+#include "regdb.h"
 #include "nl80211.h"
 
 /*
@@ -501,6 +502,38 @@
 	return 0;
 }
 
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+static int reg_query_builtin(const char *alpha2)
+{
+	const struct ieee80211_regdomain *regdom = NULL;
+	unsigned int i;
+
+	for (i = 0; i < reg_regdb_size; i++) {
+		if (alpha2_equal(alpha2, reg_regdb[i]->alpha2)) {
+			regdom = reg_copy_regd(reg_regdb[i]);
+			break;
+		}
+	}
+	if (!regdom)
+		return -ENODATA;
+
+	return reg_schedule_apply(regdom);
+}
+
+/* Feel free to add any other sanity checks here */
+static void reg_regdb_size_check(void)
+{
+	/* We should ideally BUILD_BUG_ON() but then random builds would fail */
+	WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it...");
+}
+#else
+static inline void reg_regdb_size_check(void) {}
+static inline int reg_query_builtin(const char *alpha2)
+{
+	return -ENODATA;
+}
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
 #ifdef CONFIG_CFG80211_CRDA_SUPPORT
 /* Max number of consecutive attempts to communicate with CRDA  */
 #define REG_MAX_CRDA_TIMEOUTS 10
@@ -1097,6 +1130,10 @@
 
 static bool reg_query_database(struct regulatory_request *request)
 {
+	/* query internal regulatory database (if it exists) */
+	if (reg_query_builtin(request->alpha2) == 0)
+		return true;
+
 	if (query_regdb_file(request->alpha2) == 0)
 		return true;
 
@@ -3880,6 +3917,8 @@
 	spin_lock_init(&reg_pending_beacons_lock);
 	spin_lock_init(&reg_indoor_lock);
 
+	reg_regdb_size_check();
+
 	rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom);
 
 	user_alpha2[0] = '9';
diff --git a/net/wireless/regdb.h b/net/wireless/regdb.h
new file mode 100644
index 0000000..3279cfc
--- /dev/null
+++ b/net/wireless/regdb.h
@@ -0,0 +1,23 @@
+#ifndef __REGDB_H__
+#define __REGDB_H__
+
+/*
+ * Copyright 2009 John W. Linville <linville@tuxdriver.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+extern const struct ieee80211_regdomain *reg_regdb[];
+extern int reg_regdb_size;
+
+#endif /* __REGDB_H__ */
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index be3520e..790b514 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -346,6 +346,12 @@
 
 		skb->sp->xvec[skb->sp->len++] = x;
 
+		skb_dst_force(skb);
+		if (!skb_dst(skb)) {
+			XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
+			goto drop;
+		}
+
 lock:
 		spin_lock(&x->lock);
 
@@ -385,7 +391,6 @@
 		XFRM_SKB_CB(skb)->seq.input.low = seq;
 		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
 
-		skb_dst_force(skb);
 		dev_hold(skb->dev);
 
 		if (crypto_done)
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 261995d3..6d20fbc 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -102,6 +102,7 @@
 		skb_dst_force(skb);
 		if (!skb_dst(skb)) {
 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+			err = -EHOSTUNREACH;
 			goto error_nolock;
 		}
 
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 119a427..6ea8036 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1628,7 +1628,10 @@
 		dst_copy_metrics(dst1, dst);
 
 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
-			__u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
+			__u32 mark = 0;
+
+			if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
+				mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
 
 			family = xfrm[i]->props.family;
 			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index b669262..cc0203e 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -426,6 +426,12 @@
 	module_put(mode->owner);
 }
 
+void xfrm_state_free(struct xfrm_state *x)
+{
+	kmem_cache_free(xfrm_state_cache, x);
+}
+EXPORT_SYMBOL(xfrm_state_free);
+
 static void xfrm_state_gc_destroy(struct xfrm_state *x)
 {
 	tasklet_hrtimer_cancel(&x->mtimer);
@@ -452,7 +458,7 @@
 	}
 	xfrm_dev_state_free(x);
 	security_xfrm_state_free(x);
-	kmem_cache_free(xfrm_state_cache, x);
+	xfrm_state_free(x);
 }
 
 static void xfrm_state_gc_task(struct work_struct *work)
@@ -788,7 +794,7 @@
 {
 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
 	si->sadcnt = net->xfrm.state_num;
-	si->sadhcnt = net->xfrm.state_hmask;
+	si->sadhcnt = net->xfrm.state_hmask + 1;
 	si->sadhmcnt = xfrm_state_hashmax;
 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
 }
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index df7ca2d..5669198 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2288,13 +2288,13 @@
 
 	}
 
-	kfree(x);
+	xfrm_state_free(x);
 	kfree(xp);
 
 	return 0;
 
 free_state:
-	kfree(x);
+	xfrm_state_free(x);
 nomem:
 	return err;
 }
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 904e775..cf40a82 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -55,6 +55,23 @@
 	return 0;
 }
 
+static int write_kprobe_events(const char *val)
+{
+	int fd, ret, flags;
+
+	if ((val != NULL) && (val[0] == '\0'))
+		flags = O_WRONLY | O_TRUNC;
+	else
+		flags = O_WRONLY | O_APPEND;
+
+	fd = open("/sys/kernel/debug/tracing/kprobe_events", flags);
+
+	ret = write(fd, val, strlen(val));
+	close(fd);
+
+	return ret;
+}
+
 static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
 {
 	bool is_socket = strncmp(event, "socket", 6) == 0;
@@ -166,10 +183,9 @@
 
 #ifdef __x86_64__
 		if (strncmp(event, "sys_", 4) == 0) {
-			snprintf(buf, sizeof(buf),
-				 "echo '%c:__x64_%s __x64_%s' >> /sys/kernel/debug/tracing/kprobe_events",
-				 is_kprobe ? 'p' : 'r', event, event);
-			err = system(buf);
+			snprintf(buf, sizeof(buf), "%c:__x64_%s __x64_%s",
+				is_kprobe ? 'p' : 'r', event, event);
+			err = write_kprobe_events(buf);
 			if (err >= 0) {
 				need_normal_check = false;
 				event_prefix = "__x64_";
@@ -177,10 +193,9 @@
 		}
 #endif
 		if (need_normal_check) {
-			snprintf(buf, sizeof(buf),
-				 "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
-				 is_kprobe ? 'p' : 'r', event, event);
-			err = system(buf);
+			snprintf(buf, sizeof(buf), "%c:%s %s",
+				is_kprobe ? 'p' : 'r', event, event);
+			err = write_kprobe_events(buf);
 			if (err < 0) {
 				printf("failed to create kprobe '%s' error '%s'\n",
 				       event, strerror(errno));
@@ -520,7 +535,7 @@
 		return 1;
 
 	/* clear all kprobes */
-	i = system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events");
+	i = write_kprobe_events("");
 
 	/* scan over all elf sections to get license and map info */
 	for (i = 1; i < ehdr.e_shnum; i++) {
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index 8081b6c..34414c6 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -47,8 +47,8 @@
 	$xs	= "[0-9a-f ]";	# hex character or space
 	$funcre = qr/^$x* <(.*)>:$/;
 	if ($arch eq 'aarch64') {
-		#ffffffc0006325cc:       a9bb7bfd        stp     x29, x30, [sp,#-80]!
-		$re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o;
+		#ffffffc0006325cc:       a9bb7bfd        stp     x29, x30, [sp, #-80]!
+		$re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
 	} elsif ($arch eq 'arm') {
 		#c0008ffc:	e24dd064	sub	sp, sp, #100	; 0x64
 		$re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
index 25bd2b8..c2f577d 100644
--- a/scripts/kconfig/zconf.l
+++ b/scripts/kconfig/zconf.l
@@ -73,7 +73,7 @@
 {
 	fprintf(stderr,
 	        "%s:%d:warning: ignoring unsupported character '%c'\n",
-	        zconf_curname(), zconf_lineno(), chr);
+	        current_file->name, yylineno, chr);
 }
 %}
 
@@ -221,6 +221,8 @@
 	}
 	<<EOF>>	{
 		BEGIN(INITIAL);
+		yylval.string = text;
+		return T_WORD_QUOTE;
 	}
 }
 
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 0d998c54..5a5b378 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -2157,7 +2157,7 @@
 /* Cannot check for assembler */
 static void add_retpoline(struct buffer *b)
 {
-	buf_printf(b, "\n#ifdef RETPOLINE\n");
+	buf_printf(b, "\n#ifdef CONFIG_RETPOLINE\n");
 	buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
 	buf_printf(b, "#endif\n");
 }
diff --git a/security/security.c b/security/security.c
index 957be34..f71c586 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1004,6 +1004,13 @@
 
 void security_cred_free(struct cred *cred)
 {
+	/*
+	 * There is a failure case in prepare_creds() that
+	 * may result in a call here with ->security being NULL.
+	 */
+	if (unlikely(cred->security == NULL))
+		return;
+
 	call_void_hook(cred_free, cred);
 }
 
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index fe251c6..3c3878f 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2934,7 +2934,7 @@
 		return rc;
 
 	/* Allow all mounts performed by the kernel */
-	if (flags & MS_KERNMOUNT)
+	if (flags & (MS_KERNMOUNT | MS_SUBMOUNT))
 		return 0;
 
 	ad.type = LSM_AUDIT_DATA_DENTRY;
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index f4eadd3..d31a52e 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -732,7 +732,8 @@
 	kfree(key);
 	if (datum) {
 		levdatum = datum;
-		ebitmap_destroy(&levdatum->level->cat);
+		if (levdatum->level)
+			ebitmap_destroy(&levdatum->level->cat);
 		kfree(levdatum->level);
 	}
 	kfree(datum);
@@ -2108,6 +2109,7 @@
 {
 	int i, j, rc;
 	u32 nel, len;
+	__be64 prefixbuf[1];
 	__le32 buf[3];
 	struct ocontext *l, *c;
 	u32 nodebuf[8];
@@ -2217,21 +2219,30 @@
 					goto out;
 				break;
 			}
-			case OCON_IBPKEY:
-				rc = next_entry(nodebuf, fp, sizeof(u32) * 4);
+			case OCON_IBPKEY: {
+				u32 pkey_lo, pkey_hi;
+
+				rc = next_entry(prefixbuf, fp, sizeof(u64));
 				if (rc)
 					goto out;
 
-				c->u.ibpkey.subnet_prefix = be64_to_cpu(*((__be64 *)nodebuf));
+				/* we need to have subnet_prefix in CPU order */
+				c->u.ibpkey.subnet_prefix = be64_to_cpu(prefixbuf[0]);
 
-				if (nodebuf[2] > 0xffff ||
-				    nodebuf[3] > 0xffff) {
+				rc = next_entry(buf, fp, sizeof(u32) * 2);
+				if (rc)
+					goto out;
+
+				pkey_lo = le32_to_cpu(buf[0]);
+				pkey_hi = le32_to_cpu(buf[1]);
+
+				if (pkey_lo > U16_MAX || pkey_hi > U16_MAX) {
 					rc = -EINVAL;
 					goto out;
 				}
 
-				c->u.ibpkey.low_pkey = le32_to_cpu(nodebuf[2]);
-				c->u.ibpkey.high_pkey = le32_to_cpu(nodebuf[3]);
+				c->u.ibpkey.low_pkey  = pkey_lo;
+				c->u.ibpkey.high_pkey = pkey_hi;
 
 				rc = context_read_and_validate(&c->context[0],
 							       p,
@@ -2239,7 +2250,10 @@
 				if (rc)
 					goto out;
 				break;
-			case OCON_IBENDPORT:
+			}
+			case OCON_IBENDPORT: {
+				u32 port;
+
 				rc = next_entry(buf, fp, sizeof(u32) * 2);
 				if (rc)
 					goto out;
@@ -2249,12 +2263,13 @@
 				if (rc)
 					goto out;
 
-				if (buf[1] > 0xff || buf[1] == 0) {
+				port = le32_to_cpu(buf[1]);
+				if (port > U8_MAX || port == 0) {
 					rc = -EINVAL;
 					goto out;
 				}
 
-				c->u.ibendport.port = le32_to_cpu(buf[1]);
+				c->u.ibendport.port = port;
 
 				rc = context_read_and_validate(&c->context[0],
 							       p,
@@ -2262,7 +2277,8 @@
 				if (rc)
 					goto out;
 				break;
-			}
+			} /* end case */
+			} /* end switch */
 		}
 	}
 	rc = 0;
@@ -3105,6 +3121,7 @@
 {
 	unsigned int i, j, rc;
 	size_t nel, len;
+	__be64 prefixbuf[1];
 	__le32 buf[3];
 	u32 nodebuf[8];
 	struct ocontext *c;
@@ -3192,12 +3209,17 @@
 					return rc;
 				break;
 			case OCON_IBPKEY:
-				*((__be64 *)nodebuf) = cpu_to_be64(c->u.ibpkey.subnet_prefix);
+				/* subnet_prefix is in CPU order */
+				prefixbuf[0] = cpu_to_be64(c->u.ibpkey.subnet_prefix);
 
-				nodebuf[2] = cpu_to_le32(c->u.ibpkey.low_pkey);
-				nodebuf[3] = cpu_to_le32(c->u.ibpkey.high_pkey);
+				rc = put_entry(prefixbuf, sizeof(u64), 1, fp);
+				if (rc)
+					return rc;
 
-				rc = put_entry(nodebuf, sizeof(u32), 4, fp);
+				buf[0] = cpu_to_le32(c->u.ibpkey.low_pkey);
+				buf[1] = cpu_to_le32(c->u.ibpkey.high_pkey);
+
+				rc = put_entry(buf, sizeof(u32), 2, fp);
 				if (rc)
 					return rc;
 				rc = context_write(p, &c->context[0], fp);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index ffda91a..02514fe 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -368,7 +368,9 @@
 			break;
 		case YAMA_SCOPE_RELATIONAL:
 			rcu_read_lock();
-			if (!task_is_descendant(current, child) &&
+			if (!pid_alive(child))
+				rc = -EPERM;
+			if (!rc && !task_is_descendant(current, child) &&
 			    !ptracer_exception_found(current, child) &&
 			    !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
 				rc = -EPERM;
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 1214d88..0477c5d 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -25,6 +25,7 @@
 #include <linux/time.h>
 #include <linux/mutex.h>
 #include <linux/device.h>
+#include <linux/nospec.h>
 #include <sound/core.h>
 #include <sound/minors.h>
 #include <sound/pcm.h>
@@ -129,6 +130,7 @@
 				return -EFAULT;
 			if (stream < 0 || stream > 1)
 				return -EINVAL;
+			stream = array_index_nospec(stream, 2);
 			if (get_user(subdevice, &info->subdevice))
 				return -EFAULT;
 			mutex_lock(&register_mutex);
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 529d9f4..0cb65d0 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -41,6 +41,7 @@
 	   * Mackie(Loud) U.420/U.420d
 	   * TASCAM FireOne
 	   * Stanton Controllers & Systems 1 Deck/Mixer
+	   * APOGEE duet FireWire
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called snd-oxfw.
diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
index 54cdd4f..ac20acf 100644
--- a/sound/firewire/amdtp-stream-trace.h
+++ b/sound/firewire/amdtp-stream-trace.h
@@ -131,7 +131,7 @@
 		__entry->index = index;
 	),
 	TP_printk(
-		"%02u %04u %04x %04x %02d %03u %3u %3u %02u %01u %02u",
+		"%02u %04u %04x %04x %02d %03u %02u %03u %02u %01u %02u",
 		__entry->second,
 		__entry->cycle,
 		__entry->src,
@@ -169,7 +169,7 @@
 		__entry->dest = fw_parent_device(s->unit)->node_id;
 		__entry->payload_quadlets = payload_length / 4;
 		__entry->data_blocks = data_blocks,
-		__entry->data_blocks = s->data_block_counter,
+		__entry->data_block_counter = s->data_block_counter,
 		__entry->packet_index = s->packet_index;
 		__entry->irq = !!in_interrupt();
 		__entry->index = index;
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index cb9acfe..293933f 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -629,15 +629,17 @@
 }
 
 static int handle_in_packet_without_header(struct amdtp_stream *s,
-			unsigned int payload_quadlets, unsigned int cycle,
+			unsigned int payload_length, unsigned int cycle,
 			unsigned int index)
 {
 	__be32 *buffer;
+	unsigned int payload_quadlets;
 	unsigned int data_blocks;
 	struct snd_pcm_substream *pcm;
 	unsigned int pcm_frames;
 
 	buffer = s->buffer.packets[s->packet_index].buffer;
+	payload_quadlets = payload_length / 4;
 	data_blocks = payload_quadlets / s->data_block_quadlets;
 
 	trace_in_packet_without_header(s, cycle, payload_quadlets, data_blocks,
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 9367635..de4af8a 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -434,7 +434,7 @@
 	/* Apogee Electronics, DA/AD/DD-16X (X-FireWire card) */
 	SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00010048, &spec_normal),
 	/* Apogee Electronics, Ensemble */
-	SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00001eee, &spec_normal),
+	SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x01eeee, &spec_normal),
 	/* ESI, Quatafire610 */
 	SND_BEBOB_DEV_ENTRY(VEN_ESI, 0x00010064, &spec_normal),
 	/* AcousticReality, eARMasterOne */
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
index 654a503..4d19117 100644
--- a/sound/firewire/fireface/ff-protocol-ff400.c
+++ b/sound/firewire/fireface/ff-protocol-ff400.c
@@ -152,7 +152,7 @@
 	if (reg == NULL)
 		return -ENOMEM;
 
-	if (enable) {
+	if (!enable) {
 		/*
 		 * Each quadlet is corresponding to data channels in a data
 		 * blocks in reverse order. Precisely, quadlets for available
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 2ea8be6..5f82a37 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -20,6 +20,7 @@
 #define VENDOR_LACIE		0x00d04b
 #define VENDOR_TASCAM		0x00022e
 #define OUI_STANTON		0x001260
+#define OUI_APOGEE		0x0003db
 
 #define MODEL_SATELLITE		0x00200f
 
@@ -436,6 +437,13 @@
 		.vendor_id	= OUI_STANTON,
 		.model_id	= 0x002000,
 	},
+	// APOGEE, duet FireWire
+	{
+		.match_flags	= IEEE1394_MATCH_VENDOR_ID |
+				  IEEE1394_MATCH_MODEL_ID,
+		.vendor_id	= OUI_APOGEE,
+		.model_id	= 0x01dddd,
+	},
 	{ }
 };
 MODULE_DEVICE_TABLE(ieee1394, oxfw_id_table);
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index 598d140..5fc497c 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -903,6 +903,9 @@
 	struct dsp_spos_instance * ins = chip->dsp_spos_instance;
 	int i;
 
+	if (!ins)
+		return 0;
+
 	snd_info_free_entry(ins->proc_sym_info_entry);
 	ins->proc_sym_info_entry = NULL;
 
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 6ebe817..1f25e6d 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -36,6 +36,7 @@
 #include <linux/init.h>
 #include <linux/mutex.h>
 #include <linux/moduleparam.h>
+#include <linux/nospec.h>
 
 #include <sound/core.h>
 #include <sound/tlv.h>
@@ -1026,6 +1027,8 @@
 
 	if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
 		return -EINVAL;
+	ipcm->substream = array_index_nospec(ipcm->substream,
+					     EMU10K1_FX8010_PCM_COUNT);
 	if (ipcm->channels > 32)
 		return -EINVAL;
 	pcm = &emu->fx8010.pcm[ipcm->substream];
@@ -1072,6 +1075,8 @@
 
 	if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
 		return -EINVAL;
+	ipcm->substream = array_index_nospec(ipcm->substream,
+					     EMU10K1_FX8010_PCM_COUNT);
 	pcm = &emu->fx8010.pcm[ipcm->substream];
 	mutex_lock(&emu->fx8010.lock);
 	spin_lock_irq(&emu->reg_lock);
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 0621920..e85fb04 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -249,10 +249,12 @@
 	struct snd_card *card = dev_get_drvdata(dev);
 	struct azx *chip = card->private_data;
 	struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
+	struct hdac_bus *bus = azx_bus(chip);
 
 	snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
 
 	azx_stop_chip(chip);
+	synchronize_irq(bus->irq);
 	azx_enter_link_reset(chip);
 	hda_tegra_disable_clocks(hda);
 
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 3c5f2a6..31a84a5 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -923,6 +923,7 @@
 	SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
 	SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
 	SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+	SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
 	SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
 	SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
 	SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
@@ -930,6 +931,7 @@
 	SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
 	SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
 	SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
+	SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
 	SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8a3d069..dbb38fe 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -117,6 +117,7 @@
 	int codec_variant;	/* flag for other variants */
 	unsigned int has_alc5505_dsp:1;
 	unsigned int no_depop_delay:1;
+	unsigned int done_hp_init:1;
 
 	/* for PLL fix */
 	hda_nid_t pll_nid;
@@ -3372,6 +3373,48 @@
 	snd_hda_shutup_pins(codec);
 }
 
+static void alc294_hp_init(struct hda_codec *codec)
+{
+	struct alc_spec *spec = codec->spec;
+	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+	int i, val;
+
+	if (!hp_pin)
+		return;
+
+	snd_hda_codec_write(codec, hp_pin, 0,
+			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+	msleep(100);
+
+	snd_hda_codec_write(codec, hp_pin, 0,
+			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+	alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
+	alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
+
+	/* Wait for depop procedure finish  */
+	val = alc_read_coefex_idx(codec, 0x58, 0x01);
+	for (i = 0; i < 20 && val & 0x0080; i++) {
+		msleep(50);
+		val = alc_read_coefex_idx(codec, 0x58, 0x01);
+	}
+	/* Set HP depop to auto mode */
+	alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
+	msleep(50);
+}
+
+static void alc294_init(struct hda_codec *codec)
+{
+	struct alc_spec *spec = codec->spec;
+
+	if (!spec->done_hp_init) {
+		alc294_hp_init(codec);
+		spec->done_hp_init = true;
+	}
+	alc_default_init(codec);
+}
+
 static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
 			     unsigned int val)
 {
@@ -4102,6 +4145,7 @@
 	case 0x10ec0295:
 	case 0x10ec0289:
 	case 0x10ec0299:
+		alc_process_coef_fw(codec, alc225_pre_hsmode);
 		alc_process_coef_fw(codec, coef0225);
 		break;
 	case 0x10ec0867:
@@ -5380,6 +5424,13 @@
 	snd_hda_override_wcaps(codec, 0x03, 0);
 }
 
+static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
+				  const struct hda_fixup *fix, int action)
+{
+	if (action == HDA_FIXUP_ACT_PRE_PROBE)
+		snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
+}
+
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
@@ -5492,6 +5543,7 @@
 	ALC293_FIXUP_LENOVO_SPK_NOISE,
 	ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
 	ALC255_FIXUP_DELL_SPK_NOISE,
+	ALC225_FIXUP_DISABLE_MIC_VREF,
 	ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
 	ALC295_FIXUP_DISABLE_DAC3,
 	ALC280_FIXUP_HP_HEADSET_MIC,
@@ -6191,6 +6243,12 @@
 		.chained = true,
 		.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
 	},
+	[ALC225_FIXUP_DISABLE_MIC_VREF] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = alc_fixup_disable_mic_vref,
+		.chained = true,
+		.chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+	},
 	[ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
 		.type = HDA_FIXUP_VERBS,
 		.v.verbs = (const struct hda_verb[]) {
@@ -6200,7 +6258,7 @@
 			{}
 		},
 		.chained = true,
-		.chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+		.chain_id = ALC225_FIXUP_DISABLE_MIC_VREF
 	},
 	[ALC280_FIXUP_HP_HEADSET_MIC] = {
 		.type = HDA_FIXUP_FUNC,
@@ -6424,7 +6482,7 @@
 	[ALC294_FIXUP_ASUS_HEADSET_MIC] = {
 		.type = HDA_FIXUP_PINS,
 		.v.pins = (const struct hda_pintbl[]) {
-			{ 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
+			{ 0x19, 0x01a1103c }, /* use as headset mic */
 			{ }
 		},
 		.chained = true,
@@ -6503,6 +6561,7 @@
 	SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
 	SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
+	SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -6573,6 +6632,7 @@
 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+	SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
 	SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -6825,7 +6885,7 @@
 	{.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
 	{.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
 	{.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
-	{.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"},
+	{.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"},
 	{.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"},
 	{.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"},
 	{.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"},
@@ -7271,37 +7331,6 @@
 	alc_update_coef_idx(codec, 0x4, 0, 1<<11);
 }
 
-static void alc294_hp_init(struct hda_codec *codec)
-{
-	struct alc_spec *spec = codec->spec;
-	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
-	int i, val;
-
-	if (!hp_pin)
-		return;
-
-	snd_hda_codec_write(codec, hp_pin, 0,
-			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
-
-	msleep(100);
-
-	snd_hda_codec_write(codec, hp_pin, 0,
-			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
-
-	alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
-	alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
-
-	/* Wait for depop procedure finish  */
-	val = alc_read_coefex_idx(codec, 0x58, 0x01);
-	for (i = 0; i < 20 && val & 0x0080; i++) {
-		msleep(50);
-		val = alc_read_coefex_idx(codec, 0x58, 0x01);
-	}
-	/* Set HP depop to auto mode */
-	alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
-	msleep(50);
-}
-
 /*
  */
 static int patch_alc269(struct hda_codec *codec)
@@ -7427,7 +7456,7 @@
 		spec->codec_variant = ALC269_TYPE_ALC294;
 		spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
 		alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
-		alc294_hp_init(codec);
+		spec->init_hook = alc294_init;
 		break;
 	case 0x10ec0300:
 		spec->codec_variant = ALC269_TYPE_ALC300;
@@ -7439,7 +7468,7 @@
 		spec->codec_variant = ALC269_TYPE_ALC700;
 		spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
 		alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
-		alc294_hp_init(codec);
+		spec->init_hook = alc294_init;
 		break;
 
 	}
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 1bff4b1..ba99ff0 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -30,6 +30,7 @@
 #include <linux/math64.h>
 #include <linux/vmalloc.h>
 #include <linux/io.h>
+#include <linux/nospec.h>
 
 #include <sound/core.h>
 #include <sound/control.h>
@@ -4092,15 +4093,16 @@
 				    struct snd_pcm_channel_info *info)
 {
 	struct hdsp *hdsp = snd_pcm_substream_chip(substream);
-	int mapped_channel;
+	unsigned int channel = info->channel;
 
-	if (snd_BUG_ON(info->channel >= hdsp->max_channels))
+	if (snd_BUG_ON(channel >= hdsp->max_channels))
+		return -EINVAL;
+	channel = array_index_nospec(channel, hdsp->max_channels);
+
+	if (hdsp->channel_map[channel] < 0)
 		return -EINVAL;
 
-	if ((mapped_channel = hdsp->channel_map[info->channel]) < 0)
-		return -EINVAL;
-
-	info->offset = mapped_channel * HDSP_CHANNEL_BUFFER_BYTES;
+	info->offset = hdsp->channel_map[channel] * HDSP_CHANNEL_BUFFER_BYTES;
 	info->first = 0;
 	info->step = 32;
 	return 0;
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index 3135e9e..7f376b6 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -1147,18 +1147,21 @@
 	struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
 								    DRV_NAME);
 	struct audio_drv_data *adata = dev_get_drvdata(component->dev);
+	struct device *parent = component->dev->parent;
 
 	switch (adata->asic_type) {
 	case CHIP_STONEY:
 		ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
 							    SNDRV_DMA_TYPE_DEV,
-							    NULL, ST_MIN_BUFFER,
+							    parent,
+							    ST_MIN_BUFFER,
 							    ST_MAX_BUFFER);
 		break;
 	default:
 		ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
 							    SNDRV_DMA_TYPE_DEV,
-							    NULL, MIN_BUFFER,
+							    parent,
+							    MIN_BUFFER,
 							    MAX_BUFFER);
 		break;
 	}
diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
index 3356c91..e3de1ff 100644
--- a/sound/soc/codecs/pcm3168a.c
+++ b/sound/soc/codecs/pcm3168a.c
@@ -688,15 +688,22 @@
 }
 EXPORT_SYMBOL_GPL(pcm3168a_probe);
 
-void pcm3168a_remove(struct device *dev)
+static void pcm3168a_disable(struct device *dev)
 {
 	struct pcm3168a_priv *pcm3168a = dev_get_drvdata(dev);
 
-	pm_runtime_disable(dev);
 	regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies),
-				pcm3168a->supplies);
+			       pcm3168a->supplies);
 	clk_disable_unprepare(pcm3168a->scki);
 }
+
+void pcm3168a_remove(struct device *dev)
+{
+	pm_runtime_disable(dev);
+#ifndef CONFIG_PM
+	pcm3168a_disable(dev);
+#endif
+}
 EXPORT_SYMBOL_GPL(pcm3168a_remove);
 
 #ifdef CONFIG_PM
@@ -751,10 +758,7 @@
 
 	regcache_cache_only(pcm3168a->regmap, true);
 
-	regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies),
-			       pcm3168a->supplies);
-
-	clk_disable_unprepare(pcm3168a->scki);
+	pcm3168a_disable(dev);
 
 	return 0;
 }
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 6478d10..cdb1f40 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -278,6 +278,8 @@
 
 	rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp),
 			GFP_KERNEL);
+	if (!rt5514_dsp)
+		return -ENOMEM;
 
 	rt5514_dsp->dev = &rt5514_spi->dev;
 	mutex_init(&rt5514_dsp->dma_lock);
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index e2b5a11..f03195d 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -822,6 +822,10 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
+		/* Initial cold start */
+		if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
+			break;
+
 		/* Switch off BCLK_N Divider */
 		snd_soc_component_update_bits(component, AIC32X4_BCLKN,
 				    AIC32X4_BCLKEN, 0);
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 6c36da5..e662400 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -399,7 +399,13 @@
 				struct snd_pcm_hw_params *params,
 				struct snd_soc_dai *dai)
 {
-	snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+	int ret;
+
+	ret =
+		snd_pcm_lib_malloc_pages(substream,
+				params_buffer_bytes(params));
+	if (ret)
+		return ret;
 	memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
 	return 0;
 }
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 9d9f6e4..08a5152 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -390,6 +390,20 @@
 
 static const struct dmi_system_id cht_max98090_quirk_table[] = {
 	{
+		/* Clapper model Chromebook */
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_NAME, "Clapper"),
+		},
+		.driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
+	},
+	{
+		/* Gnawty model Chromebook (Acer Chromebook CB3-111) */
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_NAME, "Gnawty"),
+		},
+		.driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
+	},
+	{
 		/* Swanky model Chromebook (Toshiba Chromebook 2) */
 		.matches = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "Swanky"),
diff --git a/sound/synth/emux/emux_hwdep.c b/sound/synth/emux/emux_hwdep.c
index e557946..d9fcae0 100644
--- a/sound/synth/emux/emux_hwdep.c
+++ b/sound/synth/emux/emux_hwdep.c
@@ -22,9 +22,9 @@
 #include <sound/core.h>
 #include <sound/hwdep.h>
 #include <linux/uaccess.h>
+#include <linux/nospec.h>
 #include "emux_voice.h"
 
-
 #define TMP_CLIENT_ID	0x1001
 
 /*
@@ -66,13 +66,16 @@
 		return -EFAULT;
 	if (info.mode < 0 || info.mode >= EMUX_MD_END)
 		return -EINVAL;
+	info.mode = array_index_nospec(info.mode, EMUX_MD_END);
 
 	if (info.port < 0) {
 		for (i = 0; i < emu->num_ports; i++)
 			emu->portptrs[i]->ctrls[info.mode] = info.value;
 	} else {
-		if (info.port < emu->num_ports)
+		if (info.port < emu->num_ports) {
+			info.port = array_index_nospec(info.port, emu->num_ports);
 			emu->portptrs[info.port]->ctrls[info.mode] = info.value;
+		}
 	}
 	return 0;
 }
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 8ecf5bb..e5b52a6 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -324,7 +324,7 @@
 		h1 = snd_usb_find_csint_desc(host_iface->extra,
 							 host_iface->extralen,
 							 NULL, UAC_HEADER);
-		if (!h1) {
+		if (!h1 || h1->bLength < sizeof(*h1)) {
 			dev_err(&dev->dev, "cannot find UAC_HEADER\n");
 			return -EINVAL;
 		}
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index d7389ed..2ea0519 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -753,8 +753,9 @@
 				       struct uac_mixer_unit_descriptor *desc)
 {
 	int mu_channels;
+	void *c;
 
-	if (desc->bLength < 11)
+	if (desc->bLength < sizeof(*desc))
 		return -EINVAL;
 	if (!desc->bNrInPins)
 		return -EINVAL;
@@ -763,6 +764,8 @@
 	case UAC_VERSION_1:
 	case UAC_VERSION_2:
 	default:
+		if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1)
+			return 0; /* no bmControls -> skip */
 		mu_channels = uac_mixer_unit_bNrChannels(desc);
 		break;
 	case UAC_VERSION_3:
@@ -772,7 +775,11 @@
 	}
 
 	if (!mu_channels)
-		return -EINVAL;
+		return 0;
+
+	c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
+	if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
+		return 0; /* no bmControls -> skip */
 
 	return mu_channels;
 }
@@ -944,7 +951,7 @@
 				struct uac_mixer_unit_descriptor *d = p1;
 
 				err = uac_mixer_unit_get_channels(state, d);
-				if (err < 0)
+				if (err <= 0)
 					return err;
 
 				term->channels = err;
@@ -2070,11 +2077,15 @@
 
 	if (state->mixer->protocol == UAC_VERSION_2) {
 		struct uac2_input_terminal_descriptor *d_v2 = raw_desc;
+		if (d_v2->bLength < sizeof(*d_v2))
+			return -EINVAL;
 		control = UAC2_TE_CONNECTOR;
 		term_id = d_v2->bTerminalID;
 		bmctls = le16_to_cpu(d_v2->bmControls);
 	} else if (state->mixer->protocol == UAC_VERSION_3) {
 		struct uac3_input_terminal_descriptor *d_v3 = raw_desc;
+		if (d_v3->bLength < sizeof(*d_v3))
+			return -EINVAL;
 		control = UAC3_TE_INSERTION;
 		term_id = d_v3->bTerminalID;
 		bmctls = le32_to_cpu(d_v3->bmControls);
@@ -2120,7 +2131,7 @@
 		if (err < 0)
 			continue;
 		/* no bmControls field (e.g. Maya44) -> ignore */
-		if (desc->bLength <= 10 + input_pins)
+		if (!num_outs)
 			continue;
 		err = check_input_term(state, desc->baSourceID[pin], &iterm);
 		if (err < 0)
@@ -2316,7 +2327,7 @@
 				char *name)
 {
 	struct uac_processing_unit_descriptor *desc = raw_desc;
-	int num_ins = desc->bNrInPins;
+	int num_ins;
 	struct usb_mixer_elem_info *cval;
 	struct snd_kcontrol *kctl;
 	int i, err, nameid, type, len;
@@ -2331,7 +2342,13 @@
 		0, NULL, default_value_info
 	};
 
-	if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
+	if (desc->bLength < 13) {
+		usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
+		return -EINVAL;
+	}
+
+	num_ins = desc->bNrInPins;
+	if (desc->bLength < 13 + num_ins ||
 	    desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
 		usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
 		return -EINVAL;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 1c73b9e..57c6209 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3326,6 +3326,9 @@
 					}
 				}
 			},
+			{
+				.ifnum = -1
+			},
 		}
 	}
 },
@@ -3374,6 +3377,9 @@
 					}
 				}
 			},
+			{
+				.ifnum = -1
+			},
 		}
 	}
 },
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 6623caf..7e93686 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1373,6 +1373,7 @@
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 		break;
 
+	case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
 	case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
 	case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
 	case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index b843015..f6ce6d5 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -601,12 +601,8 @@
 		csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT);
 
 	if (!csep || csep->bLength < 7 ||
-	    csep->bDescriptorSubtype != UAC_EP_GENERAL) {
-		usb_audio_warn(chip,
-			       "%u:%d : no or invalid class specific endpoint descriptor\n",
-			       iface_no, altsd->bAlternateSetting);
-		return 0;
-	}
+	    csep->bDescriptorSubtype != UAC_EP_GENERAL)
+		goto error;
 
 	if (protocol == UAC_VERSION_1) {
 		attributes = csep->bmAttributes;
@@ -614,6 +610,8 @@
 		struct uac2_iso_endpoint_descriptor *csep2 =
 			(struct uac2_iso_endpoint_descriptor *) csep;
 
+		if (csep2->bLength < sizeof(*csep2))
+			goto error;
 		attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX;
 
 		/* emulate the endpoint attributes of a v1 device */
@@ -623,12 +621,20 @@
 		struct uac3_iso_endpoint_descriptor *csep3 =
 			(struct uac3_iso_endpoint_descriptor *) csep;
 
+		if (csep3->bLength < sizeof(*csep3))
+			goto error;
 		/* emulate the endpoint attributes of a v1 device */
 		if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH)
 			attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL;
 	}
 
 	return attributes;
+
+ error:
+	usb_audio_warn(chip,
+		       "%u:%d : no or invalid class specific endpoint descriptor\n",
+		       iface_no, altsd->bAlternateSetting);
+	return 0;
 }
 
 /* find an input terminal descriptor (either UAC1 or UAC2) with the given
@@ -636,13 +642,17 @@
  */
 static void *
 snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface,
-					       int terminal_id)
+				       int terminal_id, bool uac23)
 {
 	struct uac2_input_terminal_descriptor *term = NULL;
+	size_t minlen = uac23 ? sizeof(struct uac2_input_terminal_descriptor) :
+		sizeof(struct uac_input_terminal_descriptor);
 
 	while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
 					       ctrl_iface->extralen,
 					       term, UAC_INPUT_TERMINAL))) {
+		if (term->bLength < minlen)
+			continue;
 		if (term->bTerminalID == terminal_id)
 			return term;
 	}
@@ -660,7 +670,8 @@
 	while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
 					       ctrl_iface->extralen,
 					       term, UAC_OUTPUT_TERMINAL))) {
-		if (term->bTerminalID == terminal_id)
+		if (term->bLength >= sizeof(*term) &&
+		    term->bTerminalID == terminal_id)
 			return term;
 	}
 
@@ -734,7 +745,8 @@
 		format = le16_to_cpu(as->wFormatTag); /* remember the format value */
 
 		iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
-							     as->bTerminalLink);
+							       as->bTerminalLink,
+							       false);
 		if (iterm) {
 			num_channels = iterm->bNrChannels;
 			chconfig = le16_to_cpu(iterm->wChannelConfig);
@@ -769,7 +781,8 @@
 		 * to extract the clock
 		 */
 		input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
-								    as->bTerminalLink);
+								    as->bTerminalLink,
+								    true);
 		if (input_term) {
 			clock = input_term->bCSourceID;
 			if (!chconfig && (num_channels == input_term->bNrChannels))
@@ -1003,7 +1016,8 @@
 	 * to extract the clock
 	 */
 	input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
-							    as->bTerminalLink);
+							    as->bTerminalLink,
+							    true);
 	if (input_term) {
 		clock = input_term->bCSourceID;
 		goto found_clock;
diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
index 95563b8..ed61fb3 100644
--- a/tools/lib/subcmd/Makefile
+++ b/tools/lib/subcmd/Makefile
@@ -36,8 +36,6 @@
 CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 
 CFLAGS += -I$(srctree)/tools/include/
-CFLAGS += -I$(srctree)/include/uapi
-CFLAGS += -I$(srctree)/include
 
 SUBCMD_IN := $(OUTPUT)libsubcmd-in.o
 
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index ce1e202..75de355 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -4968,6 +4968,7 @@
 
 				if (arg->type == PRINT_BSTRING) {
 					trace_seq_puts(s, arg->string.string);
+					arg = arg->next;
 					break;
 				}
 
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index e30d20f..f00ea77 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -294,6 +294,8 @@
   $(call feature_check,bionic)
   ifeq ($(feature-bionic), 1)
     BIONIC := 1
+    CFLAGS += -DLACKS_SIGQUEUE_PROTOTYPE
+    CFLAGS += -DLACKS_OPEN_MEMSTREAM_PROTOTYPE
     EXTLIBS := $(filter-out -lrt,$(EXTLIBS))
     EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
   endif
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 82657c0..5f69fd0 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -200,3 +200,13 @@
 
 	return perf_env__lookup_binutils_path(env, "objdump", path);
 }
+
+/*
+ * Some architectures have a single address space for kernel and user addresses,
+ * which makes it possible to determine if an address is in kernel space or user
+ * space.
+ */
+bool perf_env__single_address_space(struct perf_env *env)
+{
+	return strcmp(perf_env__arch(env), "sparc");
+}
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
index 2167001..c298a44 100644
--- a/tools/perf/arch/common.h
+++ b/tools/perf/arch/common.h
@@ -5,5 +5,6 @@
 #include "../util/env.h"
 
 int perf_env__lookup_objdump(struct perf_env *env, const char **path);
+bool perf_env__single_address_space(struct perf_env *env);
 
 #endif /* ARCH_PERF_COMMON_H */
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index db0ba8c..ba8ecaf 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -524,10 +524,21 @@
 				    struct perf_evsel *evsel)
 {
 	int err;
+	char c;
 
 	if (!evsel)
 		return 0;
 
+	/*
+	 * If supported, force pass-through config term (pt=1) even if user
+	 * sets pt=0, which avoids senseless kernel errors.
+	 */
+	if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
+	    !(evsel->attr.config & 1)) {
+		pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
+		evsel->attr.config |= 1;
+	}
+
 	err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
 				       "cyc_thresh", "caps/psb_cyc",
 				       evsel->attr.config);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index ba481d7..6c1e7ce 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -727,8 +727,8 @@
 		if (PRINT_FIELD(DSO)) {
 			memset(&alf, 0, sizeof(alf));
 			memset(&alt, 0, sizeof(alt));
-			thread__find_map(thread, sample->cpumode, from, &alf);
-			thread__find_map(thread, sample->cpumode, to, &alt);
+			thread__find_map_fb(thread, sample->cpumode, from, &alf);
+			thread__find_map_fb(thread, sample->cpumode, to, &alt);
 		}
 
 		printed += fprintf(fp, " 0x%"PRIx64, from);
@@ -774,8 +774,8 @@
 		from = br->entries[i].from;
 		to   = br->entries[i].to;
 
-		thread__find_symbol(thread, sample->cpumode, from, &alf);
-		thread__find_symbol(thread, sample->cpumode, to, &alt);
+		thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
+		thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
 
 		printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
 		if (PRINT_FIELD(DSO)) {
@@ -819,11 +819,11 @@
 		from = br->entries[i].from;
 		to   = br->entries[i].to;
 
-		if (thread__find_map(thread, sample->cpumode, from, &alf) &&
+		if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
 		    !alf.map->dso->adjust_symbols)
 			from = map__map_ip(alf.map, from);
 
-		if (thread__find_map(thread, sample->cpumode, to, &alt) &&
+		if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
 		    !alt.map->dso->adjust_symbols)
 			to = map__map_ip(alt.map, to);
 
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index d097b5b4..4072015 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1961,7 +1961,7 @@
 	return metricgroup__parse_groups(opt, str, &metric_events);
 }
 
-static const struct option stat_options[] = {
+static struct option stat_options[] = {
 	OPT_BOOLEAN('T', "transaction", &transaction_run,
 		    "hardware transaction statistics"),
 	OPT_CALLBACK('e', "event", &evsel_list, "event",
@@ -2847,6 +2847,12 @@
 		return -ENOMEM;
 
 	parse_events__shrink_config_terms();
+
+	/* String-parsing callback-based options would segfault when negated */
+	set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG);
+	set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG);
+	set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG);
+
 	argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
 					(const char **) stat_usage,
 					PARSE_OPT_STOP_AT_NON_OPTION);
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index a827919..775b998 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -43,6 +43,10 @@
 #include "util/data.h"
 #include "util/debug.h"
 
+#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
+FILE *open_memstream(char **ptr, size_t *sizeloc);
+#endif
+
 #define SUPPORT_OLD_POWER_EVENTS 1
 #define PWR_EVENT_EXIT -1
 
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index 36c903f..71e9737 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -73,7 +73,7 @@
     },
     {
         "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
         "MetricGroup": "Memory_Bound;Memory_Lat",
         "MetricName": "Load_Miss_Real_Latency"
     },
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index 36c903f..71e9737 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -73,7 +73,7 @@
     },
     {
         "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
         "MetricGroup": "Memory_Bound;Memory_Lat",
         "MetricName": "Load_Miss_Real_Latency"
     },
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index a467615..910e25e 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -291,12 +291,20 @@
 
 bool test__bp_signal_is_supported(void)
 {
-/*
- * The powerpc so far does not have support to even create
- * instruction breakpoint using the perf event interface.
- * Once it's there we can release this.
- */
-#if defined(__powerpc__) || defined(__s390x__)
+	/*
+	 * PowerPC and S390 do not support creation of instruction
+	 * breakpoints using the perf_event interface.
+	 *
+	 * ARM requires explicit rounding down of the instruction
+	 * pointer in Thumb mode, and then requires the single-step
+	 * to be handled explicitly in the overflow handler to avoid
+	 * stepping into the SIGIO handler and getting stuck on the
+	 * breakpointed instruction.
+	 *
+	 * Just disable the test for these architectures until these
+	 * issues are resolved.
+	 */
+#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__)
 	return false;
 #else
 	return true;
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index ca57765..7b5e15c 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -1005,7 +1005,7 @@
 	}
 
 swap_packet:
-	if (etmq->etm->synth_opts.last_branch) {
+	if (etm->sample_branches || etm->synth_opts.last_branch) {
 		/*
 		 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
 		 * the next incoming packet.
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 59f38c7..4c23779 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -166,7 +166,7 @@
 	struct utsname uts;
 	char *arch_name;
 
-	if (!env) { /* Assume local operation */
+	if (!env || !env->arch) { /* Assume local operation */
 		if (uname(&uts) < 0)
 			return NULL;
 		arch_name = uts.machine;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index bc64618..aa9c7df 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1576,6 +1576,24 @@
 	return al->map;
 }
 
+/*
+ * For branch stacks or branch samples, the sample cpumode might not be correct
+ * because it applies only to the sample 'ip' and not necessary to 'addr' or
+ * branch stack addresses. If possible, use a fallback to deal with those cases.
+ */
+struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
+				struct addr_location *al)
+{
+	struct map *map = thread__find_map(thread, cpumode, addr, al);
+	struct machine *machine = thread->mg->machine;
+	u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
+
+	if (map || addr_cpumode == cpumode)
+		return map;
+
+	return thread__find_map(thread, addr_cpumode, addr, al);
+}
+
 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
 				   u64 addr, struct addr_location *al)
 {
@@ -1585,6 +1603,15 @@
 	return al->sym;
 }
 
+struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
+				      u64 addr, struct addr_location *al)
+{
+	al->sym = NULL;
+	if (thread__find_map_fb(thread, cpumode, addr, al))
+		al->sym = map__find_symbol(al->map, al->addr);
+	return al->sym;
+}
+
 /*
  * Callers need to drop the reference to al->thread, obtained in
  * machine__findnew_thread()
@@ -1678,7 +1705,7 @@
 void thread__resolve(struct thread *thread, struct addr_location *al,
 		     struct perf_sample *sample)
 {
-	thread__find_map(thread, sample->cpumode, sample->addr, al);
+	thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
 
 	al->cpu = sample->cpu;
 	al->sym = NULL;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index be440df..819aa44 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -34,6 +34,10 @@
 #include <linux/log2.h>
 #include <linux/err.h>
 
+#ifdef LACKS_SIGQUEUE_PROTOTYPE
+int sigqueue(pid_t pid, int sig, const union sigval value);
+#endif
+
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
 
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 8ee8ab3..d7403d1 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -2575,6 +2575,33 @@
 	return err;
 }
 
+u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
+{
+	u8 addr_cpumode = cpumode;
+	bool kernel_ip;
+
+	if (!machine->single_address_space)
+		goto out;
+
+	kernel_ip = machine__kernel_ip(machine, addr);
+	switch (cpumode) {
+	case PERF_RECORD_MISC_KERNEL:
+	case PERF_RECORD_MISC_USER:
+		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
+					   PERF_RECORD_MISC_USER;
+		break;
+	case PERF_RECORD_MISC_GUEST_KERNEL:
+	case PERF_RECORD_MISC_GUEST_USER:
+		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
+					   PERF_RECORD_MISC_GUEST_USER;
+		break;
+	default:
+		break;
+	}
+out:
+	return addr_cpumode;
+}
+
 struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
 {
 	return dsos__findnew(&machine->dsos, filename);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index d856b85..ebde3ea 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -42,6 +42,7 @@
 	u16		  id_hdr_size;
 	bool		  comm_exec;
 	bool		  kptr_restrict_warned;
+	bool		  single_address_space;
 	char		  *root_dir;
 	char		  *mmap_name;
 	struct threads    threads[THREADS__TABLE_SIZE];
@@ -99,6 +100,8 @@
 	return ip >= kernel_start;
 }
 
+u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr);
+
 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
 				    pid_t tid);
 struct comm *machine__thread_exec_comm(struct machine *machine,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index f8cd3e7..ebb18a9 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2454,7 +2454,7 @@
 		if (!name_only && strlen(syms->alias))
 			snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
 		else
-			strncpy(name, syms->symbol, MAX_NAME_LEN);
+			strlcpy(name, syms->symbol, MAX_NAME_LEN);
 
 		evt_list[evt_i] = strdup(name);
 		if (evt_list[evt_i] == NULL)
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 7e49baa..7348eea 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -145,7 +145,7 @@
 	int fd, ret = -1;
 	char path[PATH_MAX];
 
-	snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
+	scnprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
 
 	fd = open(path, O_RDONLY);
 	if (fd == -1)
@@ -175,7 +175,7 @@
 	ssize_t sret;
 	int fd;
 
-	snprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
+	scnprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
 
 	fd = open(path, O_RDONLY);
 	if (fd == -1)
@@ -205,7 +205,7 @@
 	char path[PATH_MAX];
 	int fd;
 
-	snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
+	scnprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
 
 	fd = open(path, O_RDONLY);
 	if (fd == -1)
@@ -223,7 +223,7 @@
 	char path[PATH_MAX];
 	int fd;
 
-	snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
+	scnprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
 
 	fd = open(path, O_RDONLY);
 	if (fd == -1)
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index dfc6093..05d95de 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -494,14 +494,14 @@
 		pydict_set_item_string_decref(pyelem, "cycles",
 		    PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles));
 
-		thread__find_map(thread, sample->cpumode,
-				 br->entries[i].from, &al);
+		thread__find_map_fb(thread, sample->cpumode,
+				    br->entries[i].from, &al);
 		dsoname = get_dsoname(al.map);
 		pydict_set_item_string_decref(pyelem, "from_dsoname",
 					      _PyUnicode_FromString(dsoname));
 
-		thread__find_map(thread, sample->cpumode,
-				 br->entries[i].to, &al);
+		thread__find_map_fb(thread, sample->cpumode,
+				    br->entries[i].to, &al);
 		dsoname = get_dsoname(al.map);
 		pydict_set_item_string_decref(pyelem, "to_dsoname",
 					      _PyUnicode_FromString(dsoname));
@@ -576,14 +576,14 @@
 		if (!pyelem)
 			Py_FatalError("couldn't create Python dictionary");
 
-		thread__find_symbol(thread, sample->cpumode,
-				    br->entries[i].from, &al);
+		thread__find_symbol_fb(thread, sample->cpumode,
+				       br->entries[i].from, &al);
 		get_symoff(al.sym, &al, true, bf, sizeof(bf));
 		pydict_set_item_string_decref(pyelem, "from",
 					      _PyUnicode_FromString(bf));
 
-		thread__find_symbol(thread, sample->cpumode,
-				    br->entries[i].to, &al);
+		thread__find_symbol_fb(thread, sample->cpumode,
+				       br->entries[i].to, &al);
 		get_symoff(al.sym, &al, true, bf, sizeof(bf));
 		pydict_set_item_string_decref(pyelem, "to",
 					      _PyUnicode_FromString(bf));
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 8b93693..1108609 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -24,6 +24,7 @@
 #include "thread.h"
 #include "thread-stack.h"
 #include "stat.h"
+#include "arch/common.h"
 
 static int perf_session__deliver_event(struct perf_session *session,
 				       union perf_event *event,
@@ -150,6 +151,9 @@
 		session->machines.host.env = &perf_env;
 	}
 
+	session->machines.host.single_address_space =
+		perf_env__single_address_space(session->machines.host.env);
+
 	if (!data || perf_data__is_write(data)) {
 		/*
 		 * In O_RDONLY mode this will be performed when reading the
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 1cbada2..f735ee0 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -334,7 +334,7 @@
 	if (file) {
 		while (fgets(buf, 255, file)) {
 			if (strstr(buf, "model name")) {
-				strncpy(cpu_m, &buf[13], 255);
+				strlcpy(cpu_m, &buf[13], 255);
 				break;
 			}
 		}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 07606aa..4e2c3cb 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -94,9 +94,13 @@
 
 struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
 			     struct addr_location *al);
+struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
+				struct addr_location *al);
 
 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
 				   u64 addr, struct addr_location *al);
+struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
+				      u64 addr, struct addr_location *al);
 
 void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
 					struct addr_location *al);
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index ff9d3a5..c6635fe 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -104,16 +104,29 @@
 }
 EXPORT_SYMBOL(__wrap_devm_memremap);
 
+static void nfit_test_kill(void *_pgmap)
+{
+	struct dev_pagemap *pgmap = _pgmap;
+
+	pgmap->kill(pgmap->ref);
+}
+
 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
 {
 	resource_size_t offset = pgmap->res.start;
 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
 
-	if (nfit_res)
+	if (nfit_res) {
+		int rc;
+
+		rc = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
+		if (rc)
+			return ERR_PTR(rc);
 		return nfit_res->buf + offset - nfit_res->res.start;
+	}
 	return devm_memremap_pages(dev, pgmap);
 }
-EXPORT_SYMBOL(__wrap_devm_memremap_pages);
+EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
 
 pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
 {
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
index d9a7254..72c25a3 100644
--- a/tools/testing/selftests/android/Makefile
+++ b/tools/testing/selftests/android/Makefile
@@ -6,7 +6,7 @@
 
 include ../lib.mk
 
-all: khdr
+all:
 	@for DIR in $(SUBDIRS); do		\
 		BUILD_TARGET=$(OUTPUT)/$$DIR;	\
 		mkdir $$BUILD_TARGET  -p;	\
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index fff7fb1..f3f874b 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -124,6 +124,16 @@
 endif
 endif
 
+# Have one program compiled without "-target bpf" to test whether libbpf loads
+# it successfully
+$(OUTPUT)/test_xdp.o: test_xdp.c
+	$(CLANG) $(CLANG_FLAGS) \
+		-O2 -emit-llvm -c $< -o - | \
+	$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
+ifeq ($(DWARF2BTF),y)
+	$(BTF_PAHOLE) -J $@
+endif
+
 $(OUTPUT)/%.o: %.c
 	$(CLANG) $(CLANG_FLAGS) \
 		 -O2 -target bpf -emit-llvm -c $< -o - |      \
diff --git a/tools/testing/selftests/bpf/test_libbpf.sh b/tools/testing/selftests/bpf/test_libbpf.sh
index d97dc91..8b1bc96 100755
--- a/tools/testing/selftests/bpf/test_libbpf.sh
+++ b/tools/testing/selftests/bpf/test_libbpf.sh
@@ -33,17 +33,11 @@
 
 libbpf_open_file test_l4lb.o
 
-# TODO: fix libbpf to load noinline functions
-# [warning] libbpf: incorrect bpf_call opcode
-#libbpf_open_file test_l4lb_noinline.o
+# Load a program with BPF-to-BPF calls
+libbpf_open_file test_l4lb_noinline.o
 
-# TODO: fix test_xdp_meta.c to load with libbpf
-# [warning] libbpf: test_xdp_meta.o doesn't provide kernel version
-#libbpf_open_file test_xdp_meta.o
-
-# TODO: fix libbpf to handle .eh_frame
-# [warning] libbpf: relocation failed: no section(10)
-#libbpf_open_file ../../../../samples/bpf/tracex3_kern.o
+# Load a program compiled without the "-target bpf" flag
+libbpf_open_file test_xdp.o
 
 # Success
 exit 0
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index e436b67f..9db5a73 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -2749,6 +2749,19 @@
 		.result = ACCEPT,
 	},
 	{
+		"alu32: mov u32 const",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_7, 0),
+			BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
+			BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
 		"unpriv: partial copy of pointer",
 		.insns = {
 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index ad1eeb1..30996306 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -19,6 +19,7 @@
 TEST_PROGS := run.sh
 
 top_srcdir = ../../../../..
+KSFT_KHDR_INSTALL := 1
 include ../../lib.mk
 
 $(TEST_GEN_FILES): $(HEADERS)
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 4665cdb..59ea4c46 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -9,6 +9,7 @@
 EXTRA_OBJS += ../gpiogpio-hammer-in.o ../gpiogpio-utils.o ../gpiolsgpio-in.o
 EXTRA_OBJS += ../gpiolsgpio.o
 
+KSFT_KHDR_INSTALL := 1
 include ../lib.mk
 
 all: $(BINARIES)
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index 6ae3730..76d654e 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -354,7 +354,7 @@
  * ASSERT_EQ(expected, measured): expected == measured
  */
 #define ASSERT_EQ(expected, seen) \
-	__EXPECT(expected, seen, ==, 1)
+	__EXPECT(expected, #expected, seen, #seen, ==, 1)
 
 /**
  * ASSERT_NE(expected, seen)
@@ -365,7 +365,7 @@
  * ASSERT_NE(expected, measured): expected != measured
  */
 #define ASSERT_NE(expected, seen) \
-	__EXPECT(expected, seen, !=, 1)
+	__EXPECT(expected, #expected, seen, #seen, !=, 1)
 
 /**
  * ASSERT_LT(expected, seen)
@@ -376,7 +376,7 @@
  * ASSERT_LT(expected, measured): expected < measured
  */
 #define ASSERT_LT(expected, seen) \
-	__EXPECT(expected, seen, <, 1)
+	__EXPECT(expected, #expected, seen, #seen, <, 1)
 
 /**
  * ASSERT_LE(expected, seen)
@@ -387,7 +387,7 @@
  * ASSERT_LE(expected, measured): expected <= measured
  */
 #define ASSERT_LE(expected, seen) \
-	__EXPECT(expected, seen, <=, 1)
+	__EXPECT(expected, #expected, seen, #seen, <=, 1)
 
 /**
  * ASSERT_GT(expected, seen)
@@ -398,7 +398,7 @@
  * ASSERT_GT(expected, measured): expected > measured
  */
 #define ASSERT_GT(expected, seen) \
-	__EXPECT(expected, seen, >, 1)
+	__EXPECT(expected, #expected, seen, #seen, >, 1)
 
 /**
  * ASSERT_GE(expected, seen)
@@ -409,7 +409,7 @@
  * ASSERT_GE(expected, measured): expected >= measured
  */
 #define ASSERT_GE(expected, seen) \
-	__EXPECT(expected, seen, >=, 1)
+	__EXPECT(expected, #expected, seen, #seen, >=, 1)
 
 /**
  * ASSERT_NULL(seen)
@@ -419,7 +419,7 @@
  * ASSERT_NULL(measured): NULL == measured
  */
 #define ASSERT_NULL(seen) \
-	__EXPECT(NULL, seen, ==, 1)
+	__EXPECT(NULL, "NULL", seen, #seen, ==, 1)
 
 /**
  * ASSERT_TRUE(seen)
@@ -429,7 +429,7 @@
  * ASSERT_TRUE(measured): measured != 0
  */
 #define ASSERT_TRUE(seen) \
-	ASSERT_NE(0, seen)
+	__EXPECT(0, "0", seen, #seen, !=, 1)
 
 /**
  * ASSERT_FALSE(seen)
@@ -439,7 +439,7 @@
  * ASSERT_FALSE(measured): measured == 0
  */
 #define ASSERT_FALSE(seen) \
-	ASSERT_EQ(0, seen)
+	__EXPECT(0, "0", seen, #seen, ==, 1)
 
 /**
  * ASSERT_STREQ(expected, seen)
@@ -472,7 +472,7 @@
  * EXPECT_EQ(expected, measured): expected == measured
  */
 #define EXPECT_EQ(expected, seen) \
-	__EXPECT(expected, seen, ==, 0)
+	__EXPECT(expected, #expected, seen, #seen, ==, 0)
 
 /**
  * EXPECT_NE(expected, seen)
@@ -483,7 +483,7 @@
  * EXPECT_NE(expected, measured): expected != measured
  */
 #define EXPECT_NE(expected, seen) \
-	__EXPECT(expected, seen, !=, 0)
+	__EXPECT(expected, #expected, seen, #seen, !=, 0)
 
 /**
  * EXPECT_LT(expected, seen)
@@ -494,7 +494,7 @@
  * EXPECT_LT(expected, measured): expected < measured
  */
 #define EXPECT_LT(expected, seen) \
-	__EXPECT(expected, seen, <, 0)
+	__EXPECT(expected, #expected, seen, #seen, <, 0)
 
 /**
  * EXPECT_LE(expected, seen)
@@ -505,7 +505,7 @@
  * EXPECT_LE(expected, measured): expected <= measured
  */
 #define EXPECT_LE(expected, seen) \
-	__EXPECT(expected, seen, <=, 0)
+	__EXPECT(expected, #expected, seen, #seen, <=, 0)
 
 /**
  * EXPECT_GT(expected, seen)
@@ -516,7 +516,7 @@
  * EXPECT_GT(expected, measured): expected > measured
  */
 #define EXPECT_GT(expected, seen) \
-	__EXPECT(expected, seen, >, 0)
+	__EXPECT(expected, #expected, seen, #seen, >, 0)
 
 /**
  * EXPECT_GE(expected, seen)
@@ -527,7 +527,7 @@
  * EXPECT_GE(expected, measured): expected >= measured
  */
 #define EXPECT_GE(expected, seen) \
-	__EXPECT(expected, seen, >=, 0)
+	__EXPECT(expected, #expected, seen, #seen, >=, 0)
 
 /**
  * EXPECT_NULL(seen)
@@ -537,7 +537,7 @@
  * EXPECT_NULL(measured): NULL == measured
  */
 #define EXPECT_NULL(seen) \
-	__EXPECT(NULL, seen, ==, 0)
+	__EXPECT(NULL, "NULL", seen, #seen, ==, 0)
 
 /**
  * EXPECT_TRUE(seen)
@@ -547,7 +547,7 @@
  * EXPECT_TRUE(measured): 0 != measured
  */
 #define EXPECT_TRUE(seen) \
-	EXPECT_NE(0, seen)
+	__EXPECT(0, "0", seen, #seen, !=, 0)
 
 /**
  * EXPECT_FALSE(seen)
@@ -557,7 +557,7 @@
  * EXPECT_FALSE(measured): 0 == measured
  */
 #define EXPECT_FALSE(seen) \
-	EXPECT_EQ(0, seen)
+	__EXPECT(0, "0", seen, #seen, ==, 0)
 
 /**
  * EXPECT_STREQ(expected, seen)
@@ -597,7 +597,7 @@
 	if (_metadata->passed && _metadata->step < 255) \
 		_metadata->step++;
 
-#define __EXPECT(_expected, _seen, _t, _assert) do { \
+#define __EXPECT(_expected, _expected_str, _seen, _seen_str, _t, _assert) do { \
 	/* Avoid multiple evaluation of the cases */ \
 	__typeof__(_expected) __exp = (_expected); \
 	__typeof__(_seen) __seen = (_seen); \
@@ -606,8 +606,8 @@
 		unsigned long long __exp_print = (uintptr_t)__exp; \
 		unsigned long long __seen_print = (uintptr_t)__seen; \
 		__TH_LOG("Expected %s (%llu) %s %s (%llu)", \
-			 #_expected, __exp_print, #_t, \
-			 #_seen, __seen_print); \
+			 _expected_str, __exp_print, #_t, \
+			 _seen_str, __seen_print); \
 		_metadata->passed = 0; \
 		/* Ensure the optional handler is triggered */ \
 		_metadata->trigger = 1; \
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index ec32dad..cc83e2f 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -1,6 +1,7 @@
 all:
 
 top_srcdir = ../../../../
+KSFT_KHDR_INSTALL := 1
 UNAME_M := $(shell uname -m)
 
 LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
@@ -40,4 +41,3 @@
 
 all: $(STATIC_LIBS)
 $(TEST_GEN_PROGS): $(STATIC_LIBS)
-$(STATIC_LIBS):| khdr
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 0a8e758..8b0f164 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -16,18 +16,18 @@
 TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
 TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
 
+ifdef KSFT_KHDR_INSTALL
 top_srcdir ?= ../../../..
 include $(top_srcdir)/scripts/subarch.include
 ARCH		?= $(SUBARCH)
 
-all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
-
 .PHONY: khdr
 khdr:
 	make ARCH=$(ARCH) -C $(top_srcdir) headers_install
 
-ifdef KSFT_KHDR_INSTALL
-$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr
+all: khdr $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
+else
+all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
 endif
 
 .ONESHELL:
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 10baa16..c67d32e 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -54,6 +54,22 @@
 	return fd;
 }
 
+static int mfd_assert_reopen_fd(int fd_in)
+{
+	int r, fd;
+	char path[100];
+
+	sprintf(path, "/proc/self/fd/%d", fd_in);
+
+	fd = open(path, O_RDWR);
+	if (fd < 0) {
+		printf("re-open of existing fd %d failed\n", fd_in);
+		abort();
+	}
+
+	return fd;
+}
+
 static void mfd_fail_new(const char *name, unsigned int flags)
 {
 	int r;
@@ -255,6 +271,25 @@
 	munmap(p, mfd_def_size);
 }
 
+/* Test that PROT_READ + MAP_SHARED mappings work. */
+static void mfd_assert_read_shared(int fd)
+{
+	void *p;
+
+	/* verify PROT_READ and MAP_SHARED *is* allowed */
+	p = mmap(NULL,
+		 mfd_def_size,
+		 PROT_READ,
+		 MAP_SHARED,
+		 fd,
+		 0);
+	if (p == MAP_FAILED) {
+		printf("mmap() failed: %m\n");
+		abort();
+	}
+	munmap(p, mfd_def_size);
+}
+
 static void mfd_assert_write(int fd)
 {
 	ssize_t l;
@@ -693,6 +728,44 @@
 }
 
 /*
+ * Test SEAL_FUTURE_WRITE
+ * Test whether SEAL_FUTURE_WRITE actually prevents modifications.
+ */
+static void test_seal_future_write(void)
+{
+	int fd, fd2;
+	void *p;
+
+	printf("%s SEAL-FUTURE-WRITE\n", memfd_str);
+
+	fd = mfd_assert_new("kern_memfd_seal_future_write",
+			    mfd_def_size,
+			    MFD_CLOEXEC | MFD_ALLOW_SEALING);
+
+	p = mfd_assert_mmap_shared(fd);
+
+	mfd_assert_has_seals(fd, 0);
+
+	mfd_assert_add_seals(fd, F_SEAL_FUTURE_WRITE);
+	mfd_assert_has_seals(fd, F_SEAL_FUTURE_WRITE);
+
+	/* read should pass, writes should fail */
+	mfd_assert_read(fd);
+	mfd_assert_read_shared(fd);
+	mfd_fail_write(fd);
+
+	fd2 = mfd_assert_reopen_fd(fd);
+	/* read should pass, writes should still fail */
+	mfd_assert_read(fd2);
+	mfd_assert_read_shared(fd2);
+	mfd_fail_write(fd2);
+
+	munmap(p, mfd_def_size);
+	close(fd2);
+	close(fd);
+}
+
+/*
  * Test SEAL_SHRINK
  * Test whether SEAL_SHRINK actually prevents shrinking
  */
@@ -945,6 +1018,7 @@
 	test_basic();
 
 	test_seal_write();
+	test_seal_future_write();
 	test_seal_shrink();
 	test_seal_grow();
 	test_seal_resize();
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
index 14cfcf0..c46c0ee 100644
--- a/tools/testing/selftests/networking/timestamping/Makefile
+++ b/tools/testing/selftests/networking/timestamping/Makefile
@@ -6,6 +6,7 @@
 all: $(TEST_PROGS)
 
 top_srcdir = ../../../../..
+KSFT_KHDR_INSTALL := 1
 include ../../lib.mk
 
 clean:
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index e147323..83057fa 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1563,7 +1563,16 @@
 #ifdef SYSCALL_NUM_RET_SHARE_REG
 # define EXPECT_SYSCALL_RETURN(val, action)	EXPECT_EQ(-1, action)
 #else
-# define EXPECT_SYSCALL_RETURN(val, action)	EXPECT_EQ(val, action)
+# define EXPECT_SYSCALL_RETURN(val, action)		\
+	do {						\
+		errno = 0;				\
+		if (val < 0) {				\
+			EXPECT_EQ(-1, action);		\
+			EXPECT_EQ(-(val), errno);	\
+		} else {				\
+			EXPECT_EQ(val, action);		\
+		}					\
+	} while (0)
 #endif
 
 /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
@@ -1602,7 +1611,7 @@
 
 /* Architecture-specific syscall changing routine. */
 void change_syscall(struct __test_metadata *_metadata,
-		    pid_t tracee, int syscall)
+		    pid_t tracee, int syscall, int result)
 {
 	int ret;
 	ARCH_REGS regs;
@@ -1661,7 +1670,7 @@
 #ifdef SYSCALL_NUM_RET_SHARE_REG
 		TH_LOG("Can't modify syscall return on this architecture");
 #else
-		regs.SYSCALL_RET = EPERM;
+		regs.SYSCALL_RET = result;
 #endif
 
 #ifdef HAVE_GETREGS
@@ -1689,14 +1698,19 @@
 	case 0x1002:
 		/* change getpid to getppid. */
 		EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
-		change_syscall(_metadata, tracee, __NR_getppid);
+		change_syscall(_metadata, tracee, __NR_getppid, 0);
 		break;
 	case 0x1003:
-		/* skip gettid. */
+		/* skip gettid with valid return code. */
 		EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
-		change_syscall(_metadata, tracee, -1);
+		change_syscall(_metadata, tracee, -1, 45000);
 		break;
 	case 0x1004:
+		/* skip openat with error. */
+		EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
+		change_syscall(_metadata, tracee, -1, -ESRCH);
+		break;
+	case 0x1005:
 		/* do nothing (allow getppid) */
 		EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
 		break;
@@ -1729,9 +1743,11 @@
 	nr = get_syscall(_metadata, tracee);
 
 	if (nr == __NR_getpid)
-		change_syscall(_metadata, tracee, __NR_getppid);
+		change_syscall(_metadata, tracee, __NR_getppid, 0);
+	if (nr == __NR_gettid)
+		change_syscall(_metadata, tracee, -1, 45000);
 	if (nr == __NR_openat)
-		change_syscall(_metadata, tracee, -1);
+		change_syscall(_metadata, tracee, -1, -ESRCH);
 }
 
 FIXTURE_DATA(TRACE_syscall) {
@@ -1748,8 +1764,10 @@
 		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
 		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
 		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
-		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
+		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
 		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
+		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
+		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
 		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
 	};
 
@@ -1797,15 +1815,26 @@
 	EXPECT_NE(self->mypid, syscall(__NR_getpid));
 }
 
-TEST_F(TRACE_syscall, ptrace_syscall_dropped)
+TEST_F(TRACE_syscall, ptrace_syscall_errno)
 {
 	/* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
 	teardown_trace_fixture(_metadata, self->tracer);
 	self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
 					   true);
 
-	/* Tracer should skip the open syscall, resulting in EPERM. */
-	EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat));
+	/* Tracer should skip the open syscall, resulting in ESRCH. */
+	EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
+}
+
+TEST_F(TRACE_syscall, ptrace_syscall_faked)
+{
+	/* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
+	teardown_trace_fixture(_metadata, self->tracer);
+	self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
+					   true);
+
+	/* Tracer should skip the gettid syscall, resulting fake pid. */
+	EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
 }
 
 TEST_F(TRACE_syscall, syscall_allowed)
@@ -1838,7 +1867,21 @@
 	EXPECT_NE(self->mypid, syscall(__NR_getpid));
 }
 
-TEST_F(TRACE_syscall, syscall_dropped)
+TEST_F(TRACE_syscall, syscall_errno)
+{
+	long ret;
+
+	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+	ASSERT_EQ(0, ret);
+
+	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
+	ASSERT_EQ(0, ret);
+
+	/* openat has been skipped and an errno return. */
+	EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
+}
+
+TEST_F(TRACE_syscall, syscall_faked)
 {
 	long ret;
 
@@ -1849,8 +1892,7 @@
 	ASSERT_EQ(0, ret);
 
 	/* gettid has been skipped and an altered return value stored. */
-	EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid));
-	EXPECT_NE(self->mytid, syscall(__NR_gettid));
+	EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
 }
 
 TEST_F(TRACE_syscall, skip_after_RET_TRACE)
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index e94b7b1..dc68340 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -24,6 +24,7 @@
 
 TEST_PROGS := run_vmtests
 
+KSFT_KHDR_INSTALL := 1
 include ../lib.mk
 
 $(OUTPUT)/userfaultfd: LDLIBS += -lpthread
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index 460b4bd..5d546dc 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -1133,6 +1133,21 @@
 	pkey_assert(err);
 }
 
+void become_child(void)
+{
+	pid_t forkret;
+
+	forkret = fork();
+	pkey_assert(forkret >= 0);
+	dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
+
+	if (!forkret) {
+		/* in the child */
+		return;
+	}
+	exit(0);
+}
+
 /* Assumes that all pkeys other than 'pkey' are unallocated */
 void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
 {
@@ -1141,7 +1156,7 @@
 	int nr_allocated_pkeys = 0;
 	int i;
 
-	for (i = 0; i < NR_PKEYS*2; i++) {
+	for (i = 0; i < NR_PKEYS*3; i++) {
 		int new_pkey;
 		dprintf1("%s() alloc loop: %d\n", __func__, i);
 		new_pkey = alloc_pkey();
@@ -1152,21 +1167,27 @@
 		if ((new_pkey == -1) && (errno == ENOSPC)) {
 			dprintf2("%s() failed to allocate pkey after %d tries\n",
 				__func__, nr_allocated_pkeys);
-			break;
+		} else {
+			/*
+			 * Ensure the number of successes never
+			 * exceeds the number of keys supported
+			 * in the hardware.
+			 */
+			pkey_assert(nr_allocated_pkeys < NR_PKEYS);
+			allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
 		}
-		pkey_assert(nr_allocated_pkeys < NR_PKEYS);
-		allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
+
+		/*
+		 * Make sure that allocation state is properly
+		 * preserved across fork().
+		 */
+		if (i == NR_PKEYS*2)
+			become_child();
 	}
 
 	dprintf3("%s()::%d\n", __func__, __LINE__);
 
 	/*
-	 * ensure it did not reach the end of the loop without
-	 * failure:
-	 */
-	pkey_assert(i < NR_PKEYS*2);
-
-	/*
 	 * There are 16 pkeys supported in hardware.  Three are
 	 * allocated by the time we get here:
 	 *   1. The default key (0)
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index fb22bcc..7ef45a4 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -23,6 +23,10 @@
 #define PAGE_MASK (~(PAGE_SIZE-1))
 #define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK)
 
+/* generic data direction definitions */
+#define READ                    0
+#define WRITE                   1
+
 typedef unsigned long long phys_addr_t;
 typedef unsigned long long dma_addr_t;
 typedef size_t __kernel_size_t;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 8fb31a7..9149504 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -66,7 +66,7 @@
 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
 static u32 kvm_next_vmid;
 static unsigned int kvm_vmid_bits __read_mostly;
-static DEFINE_RWLOCK(kvm_vmid_lock);
+static DEFINE_SPINLOCK(kvm_vmid_lock);
 
 static bool vgic_present;
 
@@ -482,7 +482,9 @@
  */
 static bool need_new_vmid_gen(struct kvm *kvm)
 {
-	return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
+	u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
+	smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
+	return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
 }
 
 /**
@@ -497,16 +499,11 @@
 {
 	phys_addr_t pgd_phys;
 	u64 vmid;
-	bool new_gen;
 
-	read_lock(&kvm_vmid_lock);
-	new_gen = need_new_vmid_gen(kvm);
-	read_unlock(&kvm_vmid_lock);
-
-	if (!new_gen)
+	if (!need_new_vmid_gen(kvm))
 		return;
 
-	write_lock(&kvm_vmid_lock);
+	spin_lock(&kvm_vmid_lock);
 
 	/*
 	 * We need to re-check the vmid_gen here to ensure that if another vcpu
@@ -514,7 +511,7 @@
 	 * use the same vmid.
 	 */
 	if (!need_new_vmid_gen(kvm)) {
-		write_unlock(&kvm_vmid_lock);
+		spin_unlock(&kvm_vmid_lock);
 		return;
 	}
 
@@ -537,7 +534,6 @@
 		kvm_call_hyp(__kvm_flush_vm_context);
 	}
 
-	kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
 	kvm->arch.vmid = kvm_next_vmid;
 	kvm_next_vmid++;
 	kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
@@ -548,7 +544,10 @@
 	vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
 	kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
 
-	write_unlock(&kvm_vmid_lock);
+	smp_wmb();
+	WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
+
+	spin_unlock(&kvm_vmid_lock);
 }
 
 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index f56ff1c..ceeda7e 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -313,36 +313,30 @@
 
 	spin_lock_irqsave(&irq->irq_lock, flags);
 
-	/*
-	 * If this virtual IRQ was written into a list register, we
-	 * have to make sure the CPU that runs the VCPU thread has
-	 * synced back the LR state to the struct vgic_irq.
-	 *
-	 * As long as the conditions below are true, we know the VCPU thread
-	 * may be on its way back from the guest (we kicked the VCPU thread in
-	 * vgic_change_active_prepare)  and still has to sync back this IRQ,
-	 * so we release and re-acquire the spin_lock to let the other thread
-	 * sync back the IRQ.
-	 *
-	 * When accessing VGIC state from user space, requester_vcpu is
-	 * NULL, which is fine, because we guarantee that no VCPUs are running
-	 * when accessing VGIC state from user space so irq->vcpu->cpu is
-	 * always -1.
-	 */
-	while (irq->vcpu && /* IRQ may have state in an LR somewhere */
-	       irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
-	       irq->vcpu->cpu != -1) /* VCPU thread is running */
-		cond_resched_lock(&irq->irq_lock);
-
 	if (irq->hw) {
 		vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
 	} else {
 		u32 model = vcpu->kvm->arch.vgic.vgic_model;
+		u8 active_source;
 
 		irq->active = active;
+
+		/*
+		 * The GICv2 architecture indicates that the source CPUID for
+		 * an SGI should be provided during an EOI which implies that
+		 * the active state is stored somewhere, but at the same time
+		 * this state is not architecturally exposed anywhere and we
+		 * have no way of knowing the right source.
+		 *
+		 * This may lead to a VCPU not being able to receive
+		 * additional instances of a particular SGI after migration
+		 * for a GICv2 VM on some GIC implementations.  Oh well.
+		 */
+		active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
+
 		if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
 		    active && vgic_irq_is_sgi(irq->intid))
-			irq->active_source = requester_vcpu->vcpu_id;
+			irq->active_source = active_source;
 	}
 
 	if (irq->active)
@@ -368,14 +362,16 @@
  */
 static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
 {
-	if (intid > VGIC_NR_PRIVATE_IRQS)
+	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+	    intid > VGIC_NR_PRIVATE_IRQS)
 		kvm_arm_halt_guest(vcpu->kvm);
 }
 
 /* See vgic_change_active_prepare */
 static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
 {
-	if (intid > VGIC_NR_PRIVATE_IRQS)
+	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+	    intid > VGIC_NR_PRIVATE_IRQS)
 		kvm_arm_resume_guest(vcpu->kvm);
 }
 
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 7cfdfbc..f884a54 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -103,13 +103,13 @@
 {
 	/* SGIs and PPIs */
 	if (intid <= VGIC_MAX_PRIVATE) {
-		intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
+		intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
 		return &vcpu->arch.vgic_cpu.private_irqs[intid];
 	}
 
 	/* SPIs */
-	if (intid <= VGIC_MAX_SPI) {
-		intid = array_index_nospec(intid, VGIC_MAX_SPI);
+	if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
+		intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
 		return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
 	}