Merge tag 'linux-can-next-for-4.16-20180116' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next

Marc Kleine-Budde says:

====================
pull-request: can-next 2018-01-16

this is a pull request for net-next/master consisting of 9 patches.

This is a series of patches, some of them initially by Franklin S Cooper
Jr, which was picked up by Faiz Abbas. Faiz Abbas added some patches
while working on this series, I contributed one as well.

The first two patches add support to CAN device infrastructure to limit
the bitrate of a CAN adapter if the used CAN-transceiver has a certain
maximum bitrate.

The remaining patches improve the m_can driver. They add support for
bitrate limiting to the driver, clean up the driver and add support for
runtime PM.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/Documentation/ABI/testing/devlink-resource-mlxsw b/Documentation/ABI/testing/devlink-resource-mlxsw
new file mode 100644
index 0000000..259ed29
--- /dev/null
+++ b/Documentation/ABI/testing/devlink-resource-mlxsw
@@ -0,0 +1,33 @@
+What: 		/kvd/
+Date:		08-Jan-2018
+KernelVersion:	v4.16
+Contact:	mlxsw@mellanox.com
+Description:	The main database in the Spectrum device is a centralized
+		KVD database used for many of the tables used to configure
+		the chip including L2 FDB, L3 LPM, ECMP and more. The KVD
+		is divided into two sections, the first is hash-based table
+		and the second is a linear access table. The division
+		between the linear and hash-based sections is static and
+		require reload before the changes take effect.
+
+What: 		/kvd/linear
+Date:		08-Jan-2018
+KernelVersion:	v4.16
+Contact:	mlxsw@mellanox.com
+Description:	The linear section of the KVD is managed by software as a
+		flat memory accessed using an index.
+
+What: 		/kvd/hash_single
+Date:		08-Jan-2018
+KernelVersion:	v4.16
+Contact:	mlxsw@mellanox.com
+Description:	The hash based section of the KVD is managed by the switch
+		device. Used in case the key size is smaller or equal to
+		64bit.
+
+What: 		/kvd/hash_double
+Date:		08-Jan-2018
+KernelVersion:	v4.16
+Contact:	mlxsw@mellanox.com
+Description:	The hash based section of the KVD is managed by the switch
+		device. Used in case the key is larger than 64 bit.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index d6d862d..bfd29bc 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -375,3 +375,19 @@
 Description:	information about CPUs heterogeneity.
 
 		cpu_capacity: capacity of cpu#.
+
+What:		/sys/devices/system/cpu/vulnerabilities
+		/sys/devices/system/cpu/vulnerabilities/meltdown
+		/sys/devices/system/cpu/vulnerabilities/spectre_v1
+		/sys/devices/system/cpu/vulnerabilities/spectre_v2
+Date:		January 2018
+Contact:	Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:	Information about CPU vulnerabilities
+
+		The files are named after the code names of CPU
+		vulnerabilities. The output of those files reflects the
+		state of the CPUs in the system. Possible output values:
+
+		"Not affected"	  CPU is not affected by the vulnerability
+		"Vulnerable"	  CPU is affected and no mitigation in effect
+		"Mitigation: $M"  CPU is affected and mitigation $M is in effect
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index af7104a..46b26bf 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -713,9 +713,6 @@
 			It will be ignored when crashkernel=X,high is not used
 			or memory reserved is below 4G.
 
-	crossrelease_fullstack
-			[KNL] Allow to record full stack trace in cross-release
-
 	cryptomgr.notests
                         [KNL] Disable crypto self-tests
 
@@ -2626,6 +2623,11 @@
 	nosmt		[KNL,S390] Disable symmetric multithreading (SMT).
 			Equivalent to smt=1.
 
+	nospectre_v2	[X86] Disable all mitigations for the Spectre variant 2
+			(indirect branch prediction) vulnerability. System may
+			allow data leaks with this option, which is equivalent
+			to spectre_v2=off.
+
 	noxsave		[BUGS=X86] Disables x86 extended register state save
 			and restore using xsave. The kernel will fallback to
 			enabling legacy floating-point and sse state.
@@ -2712,8 +2714,6 @@
 			steal time is computed, but won't influence scheduler
 			behaviour
 
-	nopti		[X86-64] Disable kernel page table isolation
-
 	nolapic		[X86-32,APIC] Do not enable or use the local APIC.
 
 	nolapic_timer	[X86-32,APIC] Do not use the local APIC timer.
@@ -3100,6 +3100,12 @@
 		pcie_scan_all	Scan all possible PCIe devices.  Otherwise we
 				only look for one device below a PCIe downstream
 				port.
+		big_root_window	Try to add a big 64bit memory window to the PCIe
+				root complex on AMD CPUs. Some GFX hardware
+				can resize a BAR to allow access to all VRAM.
+				Adding the window is slightly risky (it may
+				conflict with unreported devices), so this
+				taints the kernel.
 
 	pcie_aspm=	[PCIE] Forcibly enable or disable PCIe Active State Power
 			Management.
@@ -3288,11 +3294,20 @@
 	pt.		[PARIDE]
 			See Documentation/blockdev/paride.txt.
 
-	pti=		[X86_64]
-			Control user/kernel address space isolation:
-			on - enable
-			off - disable
-			auto - default setting
+	pti=		[X86_64] Control Page Table Isolation of user and
+			kernel address spaces.  Disabling this feature
+			removes hardening, but improves performance of
+			system calls and interrupts.
+
+			on   - unconditionally enable
+			off  - unconditionally disable
+			auto - kernel detects whether your CPU model is
+			       vulnerable to issues that PTI mitigates
+
+			Not specifying this option is equivalent to pti=auto.
+
+	nopti		[X86_64]
+			Equivalent to pti=off
 
 	pty.legacy_count=
 			[KNL] Number of legacy pty's. Overwrites compiled-in
@@ -3943,6 +3958,29 @@
 	sonypi.*=	[HW] Sony Programmable I/O Control Device driver
 			See Documentation/laptops/sonypi.txt
 
+	spectre_v2=	[X86] Control mitigation of Spectre variant 2
+			(indirect branch speculation) vulnerability.
+
+			on   - unconditionally enable
+			off  - unconditionally disable
+			auto - kernel detects whether your CPU model is
+			       vulnerable
+
+			Selecting 'on' will, and 'auto' may, choose a
+			mitigation method at run time according to the
+			CPU, the available microcode, the setting of the
+			CONFIG_RETPOLINE configuration option, and the
+			compiler with which the kernel was built.
+
+			Specific mitigations can also be selected manually:
+
+			retpoline	  - replace indirect branches
+			retpoline,generic - google's original retpoline
+			retpoline,amd     - AMD-specific minimal thunk
+
+			Not specifying this option is equivalent to
+			spectre_v2=auto.
+
 	spia_io_base=	[HW,MTD]
 	spia_fio_base=
 	spia_pedr=
diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt
index 918972b..f4a3246 100644
--- a/Documentation/fault-injection/fault-injection.txt
+++ b/Documentation/fault-injection/fault-injection.txt
@@ -30,6 +30,12 @@
   injects MMC data errors on devices permitted by setting
   debugfs entries under /sys/kernel/debug/mmc0/fail_mmc_request
 
+o fail_function
+
+  injects error return on specific functions, which are marked by
+  ALLOW_ERROR_INJECTION() macro, by setting debugfs entries
+  under /sys/kernel/debug/fail_function. No boot option supported.
+
 Configure fault-injection capabilities behavior
 -----------------------------------------------
 
@@ -123,6 +129,29 @@
 	default is 'N', setting it to 'Y' will disable failure injections
 	when dealing with private (address space) futexes.
 
+- /sys/kernel/debug/fail_function/inject:
+
+	Format: { 'function-name' | '!function-name' | '' }
+	specifies the target function of error injection by name.
+	If the function name leads '!' prefix, given function is
+	removed from injection list. If nothing specified ('')
+	injection list is cleared.
+
+- /sys/kernel/debug/fail_function/injectable:
+
+	(read only) shows error injectable functions and what type of
+	error values can be specified. The error type will be one of
+	below;
+	- NULL:	retval must be 0.
+	- ERRNO: retval must be -1 to -MAX_ERRNO (-4096).
+	- ERR_NULL: retval must be 0 or -1 to -MAX_ERRNO (-4096).
+
+- /sys/kernel/debug/fail_function/<functiuon-name>/retval:
+
+	specifies the "error" return value to inject to the given
+	function for given function. This will be created when
+	user specifies new injection entry.
+
 o Boot option
 
 In order to inject faults while debugfs is not available (early boot time),
@@ -268,6 +297,45 @@
 echo "Injecting errors into the module $module... (interrupt to stop)"
 sleep 1000000
 
+------------------------------------------------------------------------------
+
+o Inject open_ctree error while btrfs mount
+
+#!/bin/bash
+
+rm -f testfile.img
+dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1
+DEVICE=$(losetup --show -f testfile.img)
+mkfs.btrfs -f $DEVICE
+mkdir -p tmpmnt
+
+FAILTYPE=fail_function
+FAILFUNC=open_ctree
+echo $FAILFUNC > /sys/kernel/debug/$FAILTYPE/inject
+echo -12 > /sys/kernel/debug/$FAILTYPE/$FAILFUNC/retval
+echo N > /sys/kernel/debug/$FAILTYPE/task-filter
+echo 100 > /sys/kernel/debug/$FAILTYPE/probability
+echo 0 > /sys/kernel/debug/$FAILTYPE/interval
+echo -1 > /sys/kernel/debug/$FAILTYPE/times
+echo 0 > /sys/kernel/debug/$FAILTYPE/space
+echo 1 > /sys/kernel/debug/$FAILTYPE/verbose
+
+mount -t btrfs $DEVICE tmpmnt
+if [ $? -ne 0 ]
+then
+	echo "SUCCESS!"
+else
+	echo "FAILED!"
+	umount tmpmnt
+fi
+
+echo > /sys/kernel/debug/$FAILTYPE/inject
+
+rmdir tmpmnt
+losetup -d $DEVICE
+rm testfile.img
+
+
 Tool to run command with failslab or fail_page_alloc
 ----------------------------------------------------
 In order to make it easier to accomplish the tasks mentioned above, we can use
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index c0727dc..f2f3f85 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -25,8 +25,8 @@
 cleaner or garbage collector) are required.  Details on the tools are
 described in the man pages included in the package.
 
-Project web page:    http://nilfs.sourceforge.net/
-Download page:       http://nilfs.sourceforge.net/en/download.html
+Project web page:    https://nilfs.sourceforge.io/
+Download page:       https://nilfs.sourceforge.io/en/download.html
 List info:           http://vger.kernel.org/vger-lists.html#linux-nilfs
 
 Caveats
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 262722d..c4a293a 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -200,10 +200,14 @@
 <expr> ::= <symbol>                             (1)
            <symbol> '=' <symbol>                (2)
            <symbol> '!=' <symbol>               (3)
-           '(' <expr> ')'                       (4)
-           '!' <expr>                           (5)
-           <expr> '&&' <expr>                   (6)
-           <expr> '||' <expr>                   (7)
+           <symbol1> '<' <symbol2>              (4)
+           <symbol1> '>' <symbol2>              (4)
+           <symbol1> '<=' <symbol2>             (4)
+           <symbol1> '>=' <symbol2>             (4)
+           '(' <expr> ')'                       (5)
+           '!' <expr>                           (6)
+           <expr> '&&' <expr>                   (7)
+           <expr> '||' <expr>                   (8)
 
 Expressions are listed in decreasing order of precedence. 
 
@@ -214,10 +218,13 @@
     otherwise 'n'.
 (3) If the values of both symbols are equal, it returns 'n',
     otherwise 'y'.
-(4) Returns the value of the expression. Used to override precedence.
-(5) Returns the result of (2-/expr/).
-(6) Returns the result of min(/expr/, /expr/).
-(7) Returns the result of max(/expr/, /expr/).
+(4) If value of <symbol1> is respectively lower, greater, lower-or-equal,
+    or greater-or-equal than value of <symbol2>, it returns 'y',
+    otherwise 'n'.
+(5) Returns the value of the expression. Used to override precedence.
+(6) Returns the result of (2-/expr/).
+(7) Returns the result of min(/expr/, /expr/).
+(8) Returns the result of max(/expr/, /expr/).
 
 An expression can have a value of 'n', 'm' or 'y' (or 0, 1, 2
 respectively for calculations). A menu entry becomes visible when its
diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
index 441a4b9..5908a21 100644
--- a/Documentation/usb/gadget-testing.txt
+++ b/Documentation/usb/gadget-testing.txt
@@ -693,7 +693,7 @@
 in each line. The rules stated above are best illustrated with an example:
 
 # mkdir functions/uvc.usb0/control/header/h
-# cd functions/uvc.usb0/control/header/h
+# cd functions/uvc.usb0/control/
 # ln -s header/h class/fs
 # ln -s header/h class/ss
 # mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p
diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt
new file mode 100644
index 0000000..d11eff6
--- /dev/null
+++ b/Documentation/x86/pti.txt
@@ -0,0 +1,186 @@
+Overview
+========
+
+Page Table Isolation (pti, previously known as KAISER[1]) is a
+countermeasure against attacks on the shared user/kernel address
+space such as the "Meltdown" approach[2].
+
+To mitigate this class of attacks, we create an independent set of
+page tables for use only when running userspace applications.  When
+the kernel is entered via syscalls, interrupts or exceptions, the
+page tables are switched to the full "kernel" copy.  When the system
+switches back to user mode, the user copy is used again.
+
+The userspace page tables contain only a minimal amount of kernel
+data: only what is needed to enter/exit the kernel such as the
+entry/exit functions themselves and the interrupt descriptor table
+(IDT).  There are a few strictly unnecessary things that get mapped
+such as the first C function when entering an interrupt (see
+comments in pti.c).
+
+This approach helps to ensure that side-channel attacks leveraging
+the paging structures do not function when PTI is enabled.  It can be
+enabled by setting CONFIG_PAGE_TABLE_ISOLATION=y at compile time.
+Once enabled at compile-time, it can be disabled at boot with the
+'nopti' or 'pti=' kernel parameters (see kernel-parameters.txt).
+
+Page Table Management
+=====================
+
+When PTI is enabled, the kernel manages two sets of page tables.
+The first set is very similar to the single set which is present in
+kernels without PTI.  This includes a complete mapping of userspace
+that the kernel can use for things like copy_to_user().
+
+Although _complete_, the user portion of the kernel page tables is
+crippled by setting the NX bit in the top level.  This ensures
+that any missed kernel->user CR3 switch will immediately crash
+userspace upon executing its first instruction.
+
+The userspace page tables map only the kernel data needed to enter
+and exit the kernel.  This data is entirely contained in the 'struct
+cpu_entry_area' structure which is placed in the fixmap which gives
+each CPU's copy of the area a compile-time-fixed virtual address.
+
+For new userspace mappings, the kernel makes the entries in its
+page tables like normal.  The only difference is when the kernel
+makes entries in the top (PGD) level.  In addition to setting the
+entry in the main kernel PGD, a copy of the entry is made in the
+userspace page tables' PGD.
+
+This sharing at the PGD level also inherently shares all the lower
+layers of the page tables.  This leaves a single, shared set of
+userspace page tables to manage.  One PTE to lock, one set of
+accessed bits, dirty bits, etc...
+
+Overhead
+========
+
+Protection against side-channel attacks is important.  But,
+this protection comes at a cost:
+
+1. Increased Memory Use
+  a. Each process now needs an order-1 PGD instead of order-0.
+     (Consumes an additional 4k per process).
+  b. The 'cpu_entry_area' structure must be 2MB in size and 2MB
+     aligned so that it can be mapped by setting a single PMD
+     entry.  This consumes nearly 2MB of RAM once the kernel
+     is decompressed, but no space in the kernel image itself.
+
+2. Runtime Cost
+  a. CR3 manipulation to switch between the page table copies
+     must be done at interrupt, syscall, and exception entry
+     and exit (it can be skipped when the kernel is interrupted,
+     though.)  Moves to CR3 are on the order of a hundred
+     cycles, and are required at every entry and exit.
+  b. A "trampoline" must be used for SYSCALL entry.  This
+     trampoline depends on a smaller set of resources than the
+     non-PTI SYSCALL entry code, so requires mapping fewer
+     things into the userspace page tables.  The downside is
+     that stacks must be switched at entry time.
+  d. Global pages are disabled for all kernel structures not
+     mapped into both kernel and userspace page tables.  This
+     feature of the MMU allows different processes to share TLB
+     entries mapping the kernel.  Losing the feature means more
+     TLB misses after a context switch.  The actual loss of
+     performance is very small, however, never exceeding 1%.
+  d. Process Context IDentifiers (PCID) is a CPU feature that
+     allows us to skip flushing the entire TLB when switching page
+     tables by setting a special bit in CR3 when the page tables
+     are changed.  This makes switching the page tables (at context
+     switch, or kernel entry/exit) cheaper.  But, on systems with
+     PCID support, the context switch code must flush both the user
+     and kernel entries out of the TLB.  The user PCID TLB flush is
+     deferred until the exit to userspace, minimizing the cost.
+     See intel.com/sdm for the gory PCID/INVPCID details.
+  e. The userspace page tables must be populated for each new
+     process.  Even without PTI, the shared kernel mappings
+     are created by copying top-level (PGD) entries into each
+     new process.  But, with PTI, there are now *two* kernel
+     mappings: one in the kernel page tables that maps everything
+     and one for the entry/exit structures.  At fork(), we need to
+     copy both.
+  f. In addition to the fork()-time copying, there must also
+     be an update to the userspace PGD any time a set_pgd() is done
+     on a PGD used to map userspace.  This ensures that the kernel
+     and userspace copies always map the same userspace
+     memory.
+  g. On systems without PCID support, each CR3 write flushes
+     the entire TLB.  That means that each syscall, interrupt
+     or exception flushes the TLB.
+  h. INVPCID is a TLB-flushing instruction which allows flushing
+     of TLB entries for non-current PCIDs.  Some systems support
+     PCIDs, but do not support INVPCID.  On these systems, addresses
+     can only be flushed from the TLB for the current PCID.  When
+     flushing a kernel address, we need to flush all PCIDs, so a
+     single kernel address flush will require a TLB-flushing CR3
+     write upon the next use of every PCID.
+
+Possible Future Work
+====================
+1. We can be more careful about not actually writing to CR3
+   unless its value is actually changed.
+2. Allow PTI to be enabled/disabled at runtime in addition to the
+   boot-time switching.
+
+Testing
+========
+
+To test stability of PTI, the following test procedure is recommended,
+ideally doing all of these in parallel:
+
+1. Set CONFIG_DEBUG_ENTRY=y
+2. Run several copies of all of the tools/testing/selftests/x86/ tests
+   (excluding MPX and protection_keys) in a loop on multiple CPUs for
+   several minutes.  These tests frequently uncover corner cases in the
+   kernel entry code.  In general, old kernels might cause these tests
+   themselves to crash, but they should never crash the kernel.
+3. Run the 'perf' tool in a mode (top or record) that generates many
+   frequent performance monitoring non-maskable interrupts (see "NMI"
+   in /proc/interrupts).  This exercises the NMI entry/exit code which
+   is known to trigger bugs in code paths that did not expect to be
+   interrupted, including nested NMIs.  Using "-c" boosts the rate of
+   NMIs, and using two -c with separate counters encourages nested NMIs
+   and less deterministic behavior.
+
+	while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done
+
+4. Launch a KVM virtual machine.
+5. Run 32-bit binaries on systems supporting the SYSCALL instruction.
+   This has been a lightly-tested code path and needs extra scrutiny.
+
+Debugging
+=========
+
+Bugs in PTI cause a few different signatures of crashes
+that are worth noting here.
+
+ * Failures of the selftests/x86 code.  Usually a bug in one of the
+   more obscure corners of entry_64.S
+ * Crashes in early boot, especially around CPU bringup.  Bugs
+   in the trampoline code or mappings cause these.
+ * Crashes at the first interrupt.  Caused by bugs in entry_64.S,
+   like screwing up a page table switch.  Also caused by
+   incorrectly mapping the IRQ handler entry code.
+ * Crashes at the first NMI.  The NMI code is separate from main
+   interrupt handlers and can have bugs that do not affect
+   normal interrupts.  Also caused by incorrectly mapping NMI
+   code.  NMIs that interrupt the entry code must be very
+   careful and can be the cause of crashes that show up when
+   running perf.
+ * Kernel crashes at the first exit to userspace.  entry_64.S
+   bugs, or failing to map some of the exit code.
+ * Crashes at first interrupt that interrupts userspace. The paths
+   in entry_64.S that return to userspace are sometimes separate
+   from the ones that return to the kernel.
+ * Double faults: overflowing the kernel stack because of page
+   faults upon page faults.  Caused by touching non-pti-mapped
+   data in the entry code, or forgetting to switch to kernel
+   CR3 before calling into C functions which are not pti-mapped.
+ * Userspace segfaults early in boot, sometimes manifesting
+   as mount(8) failing to mount the rootfs.  These have
+   tended to be TLB invalidation issues.  Usually invalidating
+   the wrong PCID, or otherwise missing an invalidation.
+
+1. https://gruss.cc/files/kaiser.pdf
+2. https://meltdownattack.com/meltdown.pdf
diff --git a/MAINTAINERS b/MAINTAINERS
index c7f40d3..3a28cee 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9660,8 +9660,8 @@
 NILFS2 FILESYSTEM
 M:	Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
 L:	linux-nilfs@vger.kernel.org
-W:	http://nilfs.sourceforge.net/
-W:	http://nilfs.osdn.jp/
+W:	https://nilfs.sourceforge.io/
+W:	https://nilfs.osdn.jp/
 T:	git git://github.com/konis/nilfs2.git
 S:	Supported
 F:	Documentation/filesystems/nilfs2.txt
diff --git a/Makefile b/Makefile
index eb59638..bf5b8cb 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -484,26 +484,6 @@
 endif
 KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
 KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
-KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
-# Quiet clang warning: comparison of unsigned expression < 0 is always false
-KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
-# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
-# source of a reference will be _MergedGlobals and not on of the whitelisted names.
-# See modpost pattern 2
-KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
-KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
-KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
-KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
-else
-
-# These warnings generated too much noise in a regular build.
-# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
 endif
 
 ifeq ($(config-targets),1)
@@ -716,6 +696,29 @@
 endif
 KBUILD_CFLAGS += $(stackp-flag)
 
+ifeq ($(cc-name),clang)
+KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
+KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+# Quiet clang warning: comparison of unsigned expression < 0 is always false
+KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
+# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
+# source of a reference will be _MergedGlobals and not on of the whitelisted names.
+# See modpost pattern 2
+KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
+KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+else
+
+# These warnings generated too much noise in a regular build.
+# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+endif
+
 ifdef CONFIG_FRAME_POINTER
 KBUILD_CFLAGS	+= -fno-omit-frame-pointer -fno-optimize-sibling-calls
 else
diff --git a/arch/Kconfig b/arch/Kconfig
index d3f4aaf..97376ac 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -196,7 +196,7 @@
 config HAVE_KPROBES_ON_FTRACE
 	bool
 
-config HAVE_KPROBE_OVERRIDE
+config HAVE_FUNCTION_ERROR_INJECTION
 	bool
 
 config HAVE_NMI
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index c6ecb97..9025699 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -88,7 +88,7 @@ void vtime_flush(struct task_struct *tsk)
 	}
 
 	if (ti->softirq_time) {
-		delta = cycle_to_nsec(ti->softirq_time));
+		delta = cycle_to_nsec(ti->softirq_time);
 		account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
 	}
 
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index a703452..555e22d 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -209,5 +209,11 @@ exc_##label##_book3e:
 	ori	r3,r3,vector_offset@l;		\
 	mtspr	SPRN_IVOR##vector_number,r3;
 
+#define RFI_TO_KERNEL							\
+	rfi
+
+#define RFI_TO_USER							\
+	rfi
+
 #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
 
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index b272052..7197b17 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -74,6 +74,59 @@
  */
 #define EX_R3		EX_DAR
 
+/*
+ * Macros for annotating the expected destination of (h)rfid
+ *
+ * The nop instructions allow us to insert one or more instructions to flush the
+ * L1-D cache when returning to userspace or a guest.
+ */
+#define RFI_FLUSH_SLOT							\
+	RFI_FLUSH_FIXUP_SECTION;					\
+	nop;								\
+	nop;								\
+	nop
+
+#define RFI_TO_KERNEL							\
+	rfid
+
+#define RFI_TO_USER							\
+	RFI_FLUSH_SLOT;							\
+	rfid;								\
+	b	rfi_flush_fallback
+
+#define RFI_TO_USER_OR_KERNEL						\
+	RFI_FLUSH_SLOT;							\
+	rfid;								\
+	b	rfi_flush_fallback
+
+#define RFI_TO_GUEST							\
+	RFI_FLUSH_SLOT;							\
+	rfid;								\
+	b	rfi_flush_fallback
+
+#define HRFI_TO_KERNEL							\
+	hrfid
+
+#define HRFI_TO_USER							\
+	RFI_FLUSH_SLOT;							\
+	hrfid;								\
+	b	hrfi_flush_fallback
+
+#define HRFI_TO_USER_OR_KERNEL						\
+	RFI_FLUSH_SLOT;							\
+	hrfid;								\
+	b	hrfi_flush_fallback
+
+#define HRFI_TO_GUEST							\
+	RFI_FLUSH_SLOT;							\
+	hrfid;								\
+	b	hrfi_flush_fallback
+
+#define HRFI_TO_UNKNOWN							\
+	RFI_FLUSH_SLOT;							\
+	hrfid;								\
+	b	hrfi_flush_fallback
+
 #ifdef CONFIG_RELOCATABLE
 #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)			\
 	mfspr	r11,SPRN_##h##SRR0;	/* save SRR0 */			\
@@ -218,7 +271,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
 	mtspr	SPRN_##h##SRR0,r12;					\
 	mfspr	r12,SPRN_##h##SRR1;	/* and SRR1 */			\
 	mtspr	SPRN_##h##SRR1,r10;					\
-	h##rfid;							\
+	h##RFI_TO_KERNEL;						\
 	b	.	/* prevent speculative execution */
 #define EXCEPTION_PROLOG_PSERIES_1(label, h)				\
 	__EXCEPTION_PROLOG_PSERIES_1(label, h)
@@ -232,7 +285,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
 	mtspr	SPRN_##h##SRR0,r12;					\
 	mfspr	r12,SPRN_##h##SRR1;	/* and SRR1 */			\
 	mtspr	SPRN_##h##SRR1,r10;					\
-	h##rfid;							\
+	h##RFI_TO_KERNEL;						\
 	b	.	/* prevent speculative execution */
 
 #define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h)			\
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 8f88f77..1e82eb3 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -187,7 +187,20 @@ label##3:					       	\
 	FTR_ENTRY_OFFSET label##1b-label##3b;		\
 	.popsection;
 
+#define RFI_FLUSH_FIXUP_SECTION				\
+951:							\
+	.pushsection __rfi_flush_fixup,"a";		\
+	.align 2;					\
+952:							\
+	FTR_ENTRY_OFFSET 951b-952b;			\
+	.popsection;
+
+
 #ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+
 void apply_feature_fixups(void);
 void setup_feature_keys(void);
 #endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index a409177..f046161 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -241,6 +241,7 @@
 #define H_GET_HCA_INFO          0x1B8
 #define H_GET_PERF_COUNT        0x1BC
 #define H_MANAGE_TRACE          0x1C0
+#define H_GET_CPU_CHARACTERISTICS 0x1C8
 #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
 #define H_QUERY_INT_STATE       0x1E4
 #define H_POLL_PENDING		0x1D8
@@ -330,6 +331,17 @@
 #define H_SIGNAL_SYS_RESET_ALL_OTHERS		-2
 /* >= 0 values are CPU number */
 
+/* H_GET_CPU_CHARACTERISTICS return values */
+#define H_CPU_CHAR_SPEC_BAR_ORI31	(1ull << 63) // IBM bit 0
+#define H_CPU_CHAR_BCCTRL_SERIALISED	(1ull << 62) // IBM bit 1
+#define H_CPU_CHAR_L1D_FLUSH_ORI30	(1ull << 61) // IBM bit 2
+#define H_CPU_CHAR_L1D_FLUSH_TRIG2	(1ull << 60) // IBM bit 3
+#define H_CPU_CHAR_L1D_THREAD_PRIV	(1ull << 59) // IBM bit 4
+
+#define H_CPU_BEHAV_FAVOUR_SECURITY	(1ull << 63) // IBM bit 0
+#define H_CPU_BEHAV_L1D_FLUSH_PR	(1ull << 62) // IBM bit 1
+#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR	(1ull << 61) // IBM bit 2
+
 /* Flag values used in H_REGISTER_PROC_TBL hcall */
 #define PROC_TABLE_OP_MASK	0x18
 #define PROC_TABLE_DEREG	0x10
@@ -436,6 +448,11 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc)
 	}
 }
 
+struct h_cpu_char_result {
+	u64 character;
+	u64 behaviour;
+};
+
 #endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 3892db9..23ac7fc 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -232,6 +232,16 @@ struct paca_struct {
 	struct sibling_subcore_state *sibling_subcore_state;
 #endif
 #endif
+#ifdef CONFIG_PPC_BOOK3S_64
+	/*
+	 * rfi fallback flush must be in its own cacheline to prevent
+	 * other paca data leaking into the L1d
+	 */
+	u64 exrfi[EX_SIZE] __aligned(0x80);
+	void *rfi_flush_fallback_area;
+	u64 l1d_flush_congruence;
+	u64 l1d_flush_sets;
+#endif
 };
 
 extern void copy_mm_to_paca(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 7f01b22..55eddf5 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu)
 	return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
 }
 
+static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
+{
+	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+	long rc;
+
+	rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
+	if (rc == H_SUCCESS) {
+		p->character = retbuf[0];
+		p->behaviour = retbuf[1];
+	}
+
+	return rc;
+}
+
 #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index cf00ec2..469b7fd 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -39,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {}
 static inline void pseries_little_endian_exceptions(void) {}
 #endif /* CONFIG_PPC_PSERIES */
 
+void rfi_flush_enable(bool enable);
+
+/* These are bit flags */
+enum l1d_flush_type {
+	L1D_FLUSH_NONE		= 0x1,
+	L1D_FLUSH_FALLBACK	= 0x2,
+	L1D_FLUSH_ORI		= 0x4,
+	L1D_FLUSH_MTTRIG	= 0x8,
+};
+
+void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
+void do_rfi_flush_fixups(enum l1d_flush_type types);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif	/* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6b95841..f390d57 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -237,6 +237,11 @@ int main(void)
 	OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
 	OFFSET(PACA_IN_MCE, paca_struct, in_mce);
 	OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
+	OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
+	OFFSET(PACA_EXRFI, paca_struct, exrfi);
+	OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence);
+	OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets);
+
 #endif
 	OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
 	OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 3320bca..2748584 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -37,6 +37,11 @@
 #include <asm/tm.h>
 #include <asm/ppc-opcode.h>
 #include <asm/export.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/exception-64s.h>
+#else
+#include <asm/exception-64e.h>
+#endif
 
 /*
  * System calls.
@@ -262,13 +267,23 @@
 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 
 	ld	r13,GPR13(r1)	/* only restore r13 if returning to usermode */
+	ld	r2,GPR2(r1)
+	ld	r1,GPR1(r1)
+	mtlr	r4
+	mtcr	r5
+	mtspr	SPRN_SRR0,r7
+	mtspr	SPRN_SRR1,r8
+	RFI_TO_USER
+	b	.	/* prevent speculative execution */
+
+	/* exit to kernel */
 1:	ld	r2,GPR2(r1)
 	ld	r1,GPR1(r1)
 	mtlr	r4
 	mtcr	r5
 	mtspr	SPRN_SRR0,r7
 	mtspr	SPRN_SRR1,r8
-	RFI
+	RFI_TO_KERNEL
 	b	.	/* prevent speculative execution */
 
 .Lsyscall_error:
@@ -397,8 +412,7 @@
 	mtmsrd	r10, 1
 	mtspr	SPRN_SRR0, r11
 	mtspr	SPRN_SRR1, r12
-
-	rfid
+	RFI_TO_USER
 	b	.	/* prevent speculative execution */
 #endif
 _ASM_NOKPROBE_SYMBOL(system_call_common);
@@ -878,7 +892,7 @@
 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 	ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
 	REST_GPR(13, r1)
-1:
+
 	mtspr	SPRN_SRR1,r3
 
 	ld	r2,_CCR(r1)
@@ -891,8 +905,22 @@
 	ld	r3,GPR3(r1)
 	ld	r4,GPR4(r1)
 	ld	r1,GPR1(r1)
+	RFI_TO_USER
+	b	.	/* prevent speculative execution */
 
-	rfid
+1:	mtspr	SPRN_SRR1,r3
+
+	ld	r2,_CCR(r1)
+	mtcrf	0xFF,r2
+	ld	r2,_NIP(r1)
+	mtspr	SPRN_SRR0,r2
+
+	ld	r0,GPR0(r1)
+	ld	r2,GPR2(r1)
+	ld	r3,GPR3(r1)
+	ld	r4,GPR4(r1)
+	ld	r1,GPR1(r1)
+	RFI_TO_KERNEL
 	b	.	/* prevent speculative execution */
 
 #endif /* CONFIG_PPC_BOOK3E */
@@ -1073,7 +1101,7 @@
 	
 	mtspr	SPRN_SRR0,r5
 	mtspr	SPRN_SRR1,r6
-	rfid
+	RFI_TO_KERNEL
 	b	.	/* prevent speculative execution */
 
 rtas_return_loc:
@@ -1098,7 +1126,7 @@
 
 	mtspr	SPRN_SRR0,r3
 	mtspr	SPRN_SRR1,r4
-	rfid
+	RFI_TO_KERNEL
 	b	.	/* prevent speculative execution */
 _ASM_NOKPROBE_SYMBOL(__enter_rtas)
 _ASM_NOKPROBE_SYMBOL(rtas_return_loc)
@@ -1171,7 +1199,7 @@
 	LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
 	andc	r11,r11,r12
 	mtsrr1	r11
-	rfid
+	RFI_TO_KERNEL
 #endif /* CONFIG_PPC_BOOK3E */
 
 1:	/* Return from OF */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e441b46..2dc10bf 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -256,7 +256,7 @@
 	LOAD_HANDLER(r12, machine_check_handle_early)
 1:	mtspr	SPRN_SRR0,r12
 	mtspr	SPRN_SRR1,r11
-	rfid
+	RFI_TO_KERNEL
 	b	.	/* prevent speculative execution */
 2:
 	/* Stack overflow. Stay on emergency stack and panic.
@@ -445,7 +445,7 @@
 	li	r3,MSR_ME
 	andc	r10,r10,r3		/* Turn off MSR_ME */
 	mtspr	SPRN_SRR1,r10
-	rfid
+	RFI_TO_KERNEL
 	b	.
 2:
 	/*
@@ -463,7 +463,7 @@
 	 */
 	bl	machine_check_queue_event
 	MACHINE_CHECK_HANDLER_WINDUP
-	rfid
+	RFI_TO_USER_OR_KERNEL
 9:
 	/* Deliver the machine check to host kernel in V mode. */
 	MACHINE_CHECK_HANDLER_WINDUP
@@ -598,6 +598,9 @@
 	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
 	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
 
+	andi.	r9,r11,MSR_PR	// Check for exception from userspace
+	cmpdi	cr4,r9,MSR_PR	// And save the result in CR4 for later
+
 	/*
 	 * Test MSR_RI before calling slb_allocate_realmode, because the
 	 * MSR in r11 gets clobbered. However we still want to allocate
@@ -624,9 +627,12 @@
 
 	/* All done -- return from exception. */
 
+	bne	cr4,1f		/* returning to kernel */
+
 .machine	push
 .machine	"power4"
 	mtcrf	0x80,r9
+	mtcrf	0x08,r9		/* MSR[PR] indication is in cr4 */
 	mtcrf	0x04,r9		/* MSR[RI] indication is in cr5 */
 	mtcrf	0x02,r9		/* I/D indication is in cr6 */
 	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
@@ -640,8 +646,29 @@
 	ld	r11,PACA_EXSLB+EX_R11(r13)
 	ld	r12,PACA_EXSLB+EX_R12(r13)
 	ld	r13,PACA_EXSLB+EX_R13(r13)
-	rfid
+	RFI_TO_USER
 	b	.	/* prevent speculative execution */
+1:
+.machine	push
+.machine	"power4"
+	mtcrf	0x80,r9
+	mtcrf	0x08,r9		/* MSR[PR] indication is in cr4 */
+	mtcrf	0x04,r9		/* MSR[RI] indication is in cr5 */
+	mtcrf	0x02,r9		/* I/D indication is in cr6 */
+	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
+.machine	pop
+
+	RESTORE_CTR(r9, PACA_EXSLB)
+	RESTORE_PPR_PACA(PACA_EXSLB, r9)
+	mr	r3,r12
+	ld	r9,PACA_EXSLB+EX_R9(r13)
+	ld	r10,PACA_EXSLB+EX_R10(r13)
+	ld	r11,PACA_EXSLB+EX_R11(r13)
+	ld	r12,PACA_EXSLB+EX_R12(r13)
+	ld	r13,PACA_EXSLB+EX_R13(r13)
+	RFI_TO_KERNEL
+	b	.	/* prevent speculative execution */
+
 
 2:	std     r3,PACA_EXSLB+EX_DAR(r13)
 	mr	r3,r12
@@ -651,7 +678,7 @@
 	mtspr	SPRN_SRR0,r10
 	ld	r10,PACAKMSR(r13)
 	mtspr	SPRN_SRR1,r10
-	rfid
+	RFI_TO_KERNEL
 	b	.
 
 8:	std     r3,PACA_EXSLB+EX_DAR(r13)
@@ -662,7 +689,7 @@
 	mtspr	SPRN_SRR0,r10
 	ld	r10,PACAKMSR(r13)
 	mtspr	SPRN_SRR1,r10
-	rfid
+	RFI_TO_KERNEL
 	b	.
 
 EXC_COMMON_BEGIN(unrecov_slb)
@@ -901,7 +928,7 @@
 	mtspr	SPRN_SRR0,r10 ; 				\
 	ld	r10,PACAKMSR(r13) ;				\
 	mtspr	SPRN_SRR1,r10 ; 				\
-	rfid ; 							\
+	RFI_TO_KERNEL ;						\
 	b	. ;	/* prevent speculative execution */
 
 #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
@@ -917,7 +944,7 @@
 	xori	r12,r12,MSR_LE ;				\
 	mtspr	SPRN_SRR1,r12 ;					\
 	mr	r13,r9 ;					\
-	rfid ;		/* return to userspace */		\
+	RFI_TO_USER ;	/* return to userspace */		\
 	b	. ;	/* prevent speculative execution */
 #else
 #define SYSCALL_FASTENDIAN_TEST
@@ -1063,7 +1090,7 @@
 	mtcr	r11
 	REST_GPR(11, r1)
 	ld	r1,GPR1(r1)
-	hrfid
+	HRFI_TO_USER_OR_KERNEL
 
 1:	mtcr	r11
 	REST_GPR(11, r1)
@@ -1314,7 +1341,7 @@
 	ld	r11,PACA_EXGEN+EX_R11(r13)
 	ld	r12,PACA_EXGEN+EX_R12(r13)
 	ld	r13,PACA_EXGEN+EX_R13(r13)
-	HRFID
+	HRFI_TO_UNKNOWN
 	b	.
 #endif
 
@@ -1418,10 +1445,94 @@
 	ld	r10,PACA_EXGEN+EX_R10(r13);		\
 	ld	r11,PACA_EXGEN+EX_R11(r13);		\
 	/* returns to kernel where r13 must be set up, so don't restore it */ \
-	##_H##rfid;					\
+	##_H##RFI_TO_KERNEL;				\
 	b	.;					\
 	MASKED_DEC_HANDLER(_H)
 
+TRAMP_REAL_BEGIN(rfi_flush_fallback)
+	SET_SCRATCH0(r13);
+	GET_PACA(r13);
+	std	r9,PACA_EXRFI+EX_R9(r13)
+	std	r10,PACA_EXRFI+EX_R10(r13)
+	std	r11,PACA_EXRFI+EX_R11(r13)
+	std	r12,PACA_EXRFI+EX_R12(r13)
+	std	r8,PACA_EXRFI+EX_R13(r13)
+	mfctr	r9
+	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+	ld	r11,PACA_L1D_FLUSH_SETS(r13)
+	ld	r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
+	/*
+	 * The load adresses are at staggered offsets within cachelines,
+	 * which suits some pipelines better (on others it should not
+	 * hurt).
+	 */
+	addi	r12,r12,8
+	mtctr	r11
+	DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
+
+	/* order ld/st prior to dcbt stop all streams with flushing */
+	sync
+1:	li	r8,0
+	.rept	8 /* 8-way set associative */
+	ldx	r11,r10,r8
+	add	r8,r8,r12
+	xor	r11,r11,r11	// Ensure r11 is 0 even if fallback area is not
+	add	r8,r8,r11	// Add 0, this creates a dependency on the ldx
+	.endr
+	addi	r10,r10,128 /* 128 byte cache line */
+	bdnz	1b
+
+	mtctr	r9
+	ld	r9,PACA_EXRFI+EX_R9(r13)
+	ld	r10,PACA_EXRFI+EX_R10(r13)
+	ld	r11,PACA_EXRFI+EX_R11(r13)
+	ld	r12,PACA_EXRFI+EX_R12(r13)
+	ld	r8,PACA_EXRFI+EX_R13(r13)
+	GET_SCRATCH0(r13);
+	rfid
+
+TRAMP_REAL_BEGIN(hrfi_flush_fallback)
+	SET_SCRATCH0(r13);
+	GET_PACA(r13);
+	std	r9,PACA_EXRFI+EX_R9(r13)
+	std	r10,PACA_EXRFI+EX_R10(r13)
+	std	r11,PACA_EXRFI+EX_R11(r13)
+	std	r12,PACA_EXRFI+EX_R12(r13)
+	std	r8,PACA_EXRFI+EX_R13(r13)
+	mfctr	r9
+	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+	ld	r11,PACA_L1D_FLUSH_SETS(r13)
+	ld	r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
+	/*
+	 * The load adresses are at staggered offsets within cachelines,
+	 * which suits some pipelines better (on others it should not
+	 * hurt).
+	 */
+	addi	r12,r12,8
+	mtctr	r11
+	DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
+
+	/* order ld/st prior to dcbt stop all streams with flushing */
+	sync
+1:	li	r8,0
+	.rept	8 /* 8-way set associative */
+	ldx	r11,r10,r8
+	add	r8,r8,r12
+	xor	r11,r11,r11	// Ensure r11 is 0 even if fallback area is not
+	add	r8,r8,r11	// Add 0, this creates a dependency on the ldx
+	.endr
+	addi	r10,r10,128 /* 128 byte cache line */
+	bdnz	1b
+
+	mtctr	r9
+	ld	r9,PACA_EXRFI+EX_R9(r13)
+	ld	r10,PACA_EXRFI+EX_R10(r13)
+	ld	r11,PACA_EXRFI+EX_R11(r13)
+	ld	r12,PACA_EXRFI+EX_R12(r13)
+	ld	r8,PACA_EXRFI+EX_R13(r13)
+	GET_SCRATCH0(r13);
+	hrfid
+
 /*
  * Real mode exceptions actually use this too, but alternate
  * instruction code patches (which end up in the common .text area)
@@ -1441,7 +1552,7 @@
 	addi	r13, r13, 4
 	mtspr	SPRN_SRR0, r13
 	GET_SCRATCH0(r13)
-	rfid
+	RFI_TO_KERNEL
 	b	.
 
 TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
@@ -1453,7 +1564,7 @@
 	addi	r13, r13, 4
 	mtspr	SPRN_HSRR0, r13
 	GET_SCRATCH0(r13)
-	hrfid
+	HRFI_TO_KERNEL
 	b	.
 #endif
 
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 8956a98..491be41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -801,3 +801,104 @@ static int __init disable_hardlockup_detector(void)
 	return 0;
 }
 early_initcall(disable_hardlockup_detector);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+static enum l1d_flush_type enabled_flush_types;
+static void *l1d_flush_fallback_area;
+static bool no_rfi_flush;
+bool rfi_flush;
+
+static int __init handle_no_rfi_flush(char *p)
+{
+	pr_info("rfi-flush: disabled on command line.");
+	no_rfi_flush = true;
+	return 0;
+}
+early_param("no_rfi_flush", handle_no_rfi_flush);
+
+/*
+ * The RFI flush is not KPTI, but because users will see doco that says to use
+ * nopti we hijack that option here to also disable the RFI flush.
+ */
+static int __init handle_no_pti(char *p)
+{
+	pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
+	handle_no_rfi_flush(NULL);
+	return 0;
+}
+early_param("nopti", handle_no_pti);
+
+static void do_nothing(void *unused)
+{
+	/*
+	 * We don't need to do the flush explicitly, just enter+exit kernel is
+	 * sufficient, the RFI exit handlers will do the right thing.
+	 */
+}
+
+void rfi_flush_enable(bool enable)
+{
+	if (rfi_flush == enable)
+		return;
+
+	if (enable) {
+		do_rfi_flush_fixups(enabled_flush_types);
+		on_each_cpu(do_nothing, NULL, 1);
+	} else
+		do_rfi_flush_fixups(L1D_FLUSH_NONE);
+
+	rfi_flush = enable;
+}
+
+static void init_fallback_flush(void)
+{
+	u64 l1d_size, limit;
+	int cpu;
+
+	l1d_size = ppc64_caches.l1d.size;
+	limit = min(safe_stack_limit(), ppc64_rma_size);
+
+	/*
+	 * Align to L1d size, and size it at 2x L1d size, to catch possible
+	 * hardware prefetch runoff. We don't have a recipe for load patterns to
+	 * reliably avoid the prefetcher.
+	 */
+	l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
+	memset(l1d_flush_fallback_area, 0, l1d_size * 2);
+
+	for_each_possible_cpu(cpu) {
+		/*
+		 * The fallback flush is currently coded for 8-way
+		 * associativity. Different associativity is possible, but it
+		 * will be treated as 8-way and may not evict the lines as
+		 * effectively.
+		 *
+		 * 128 byte lines are mandatory.
+		 */
+		u64 c = l1d_size / 8;
+
+		paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
+		paca[cpu].l1d_flush_congruence = c;
+		paca[cpu].l1d_flush_sets = c / 128;
+	}
+}
+
+void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
+{
+	if (types & L1D_FLUSH_FALLBACK) {
+		pr_info("rfi-flush: Using fallback displacement flush\n");
+		init_fallback_flush();
+	}
+
+	if (types & L1D_FLUSH_ORI)
+		pr_info("rfi-flush: Using ori type flush\n");
+
+	if (types & L1D_FLUSH_MTTRIG)
+		pr_info("rfi-flush: Using mttrig type flush\n");
+
+	enabled_flush_types = types;
+
+	if (!no_rfi_flush)
+		rfi_flush_enable(enable);
+}
+#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 0494e15..307843d 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -132,6 +132,15 @@
 	/* Read-only data */
 	RO_DATA(PAGE_SIZE)
 
+#ifdef CONFIG_PPC64
+	. = ALIGN(8);
+	__rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
+		__start___rfi_flush_fixup = .;
+		*(__rfi_flush_fixup)
+		__stop___rfi_flush_fixup = .;
+	}
+#endif
+
 	EXCEPTION_TABLE(0)
 
 	NOTES :kernel :notes
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 29ebe2f..a93d719 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 		gpte->may_read = true;
 		gpte->may_write = true;
 		gpte->page_size = MMU_PAGE_4K;
+		gpte->wimg = HPTE_R_M;
 
 		return 0;
 	}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 9660972..b73dbc9 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -65,11 +65,17 @@ struct kvm_resize_hpt {
 	u32 order;
 
 	/* These fields protected by kvm->lock */
-	int error;
-	bool prepare_done;
 
-	/* Private to the work thread, until prepare_done is true,
-	 * then protected by kvm->resize_hpt_sem */
+	/* Possible values and their usage:
+	 *  <0     an error occurred during allocation,
+	 *  -EBUSY allocation is in the progress,
+	 *  0      allocation made successfuly.
+	 */
+	int error;
+
+	/* Private to the work thread, until error != -EBUSY,
+	 * then protected by kvm->lock.
+	 */
 	struct kvm_hpt_info hpt;
 };
 
@@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
 		 * Reset all the reverse-mapping chains for all memslots
 		 */
 		kvmppc_rmap_reset(kvm);
-		/* Ensure that each vcpu will flush its TLB on next entry. */
-		cpumask_setall(&kvm->arch.need_tlb_flush);
 		err = 0;
 		goto out;
 	}
@@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
 	kvmppc_set_hpt(kvm, &info);
 
 out:
+	if (err == 0)
+		/* Ensure that each vcpu will flush its TLB on next entry. */
+		cpumask_setall(&kvm->arch.need_tlb_flush);
+
 	mutex_unlock(&kvm->lock);
 	return err;
 }
@@ -1413,16 +1421,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
 
 static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
 {
-	BUG_ON(kvm->arch.resize_hpt != resize);
+	if (WARN_ON(!mutex_is_locked(&kvm->lock)))
+		return;
 
 	if (!resize)
 		return;
 
-	if (resize->hpt.virt)
-		kvmppc_free_hpt(&resize->hpt);
+	if (resize->error != -EBUSY) {
+		if (resize->hpt.virt)
+			kvmppc_free_hpt(&resize->hpt);
+		kfree(resize);
+	}
 
-	kvm->arch.resize_hpt = NULL;
-	kfree(resize);
+	if (kvm->arch.resize_hpt == resize)
+		kvm->arch.resize_hpt = NULL;
 }
 
 static void resize_hpt_prepare_work(struct work_struct *work)
@@ -1431,17 +1443,41 @@ static void resize_hpt_prepare_work(struct work_struct *work)
 						     struct kvm_resize_hpt,
 						     work);
 	struct kvm *kvm = resize->kvm;
-	int err;
+	int err = 0;
 
-	resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
-			 resize->order);
-
-	err = resize_hpt_allocate(resize);
+	if (WARN_ON(resize->error != -EBUSY))
+		return;
 
 	mutex_lock(&kvm->lock);
 
+	/* Request is still current? */
+	if (kvm->arch.resize_hpt == resize) {
+		/* We may request large allocations here:
+		 * do not sleep with kvm->lock held for a while.
+		 */
+		mutex_unlock(&kvm->lock);
+
+		resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
+				 resize->order);
+
+		err = resize_hpt_allocate(resize);
+
+		/* We have strict assumption about -EBUSY
+		 * when preparing for HPT resize.
+		 */
+		if (WARN_ON(err == -EBUSY))
+			err = -EINPROGRESS;
+
+		mutex_lock(&kvm->lock);
+		/* It is possible that kvm->arch.resize_hpt != resize
+		 * after we grab kvm->lock again.
+		 */
+	}
+
 	resize->error = err;
-	resize->prepare_done = true;
+
+	if (kvm->arch.resize_hpt != resize)
+		resize_hpt_release(kvm, resize);
 
 	mutex_unlock(&kvm->lock);
 }
@@ -1466,14 +1502,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
 
 	if (resize) {
 		if (resize->order == shift) {
-			/* Suitable resize in progress */
-			if (resize->prepare_done) {
-				ret = resize->error;
-				if (ret != 0)
-					resize_hpt_release(kvm, resize);
-			} else {
+			/* Suitable resize in progress? */
+			ret = resize->error;
+			if (ret == -EBUSY)
 				ret = 100; /* estimated time in ms */
-			}
+			else if (ret)
+				resize_hpt_release(kvm, resize);
 
 			goto out;
 		}
@@ -1493,6 +1527,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
 		ret = -ENOMEM;
 		goto out;
 	}
+
+	resize->error = -EBUSY;
 	resize->order = shift;
 	resize->kvm = kvm;
 	INIT_WORK(&resize->work, resize_hpt_prepare_work);
@@ -1547,16 +1583,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
 	if (!resize || (resize->order != shift))
 		goto out;
 
-	ret = -EBUSY;
-	if (!resize->prepare_done)
-		goto out;
-
 	ret = resize->error;
-	if (ret != 0)
+	if (ret)
 		goto out;
 
 	ret = resize_hpt_rehash(resize);
-	if (ret != 0)
+	if (ret)
 		goto out;
 
 	resize_hpt_pivot(resize);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 2659844..9c61f73 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -79,7 +79,7 @@
 	mtmsrd	r0,1		/* clear RI in MSR */
 	mtsrr0	r5
 	mtsrr1	r6
-	RFI
+	RFI_TO_KERNEL
 
 kvmppc_call_hv_entry:
 BEGIN_FTR_SECTION
@@ -199,7 +199,7 @@
 	mtmsrd	r6, 1			/* Clear RI in MSR */
 	mtsrr0	r8
 	mtsrr1	r7
-	RFI
+	RFI_TO_KERNEL
 
 	/* Virtual-mode return */
 .Lvirt_return:
@@ -1167,8 +1167,7 @@
 
 	ld	r0, VCPU_GPR(R0)(r4)
 	ld	r4, VCPU_GPR(R4)(r4)
-
-	hrfid
+	HRFI_TO_GUEST
 	b	.
 
 secondary_too_late:
@@ -3320,7 +3319,7 @@
 	ld	r4, PACAKMSR(r13)
 	mtspr	SPRN_SRR0, r3
 	mtspr	SPRN_SRR1, r4
-	rfid
+	RFI_TO_KERNEL
 9:	addi	r3, r1, STACK_FRAME_OVERHEAD
 	bl	kvmppc_bad_interrupt
 	b	9b
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index d0dc862..7deaeeb 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
 #define MSR_USER32 MSR_USER
 #define MSR_USER64 MSR_USER
 #define HW_PAGE_SIZE PAGE_SIZE
+#define HPTE_R_M   _PAGE_COHERENT
 #endif
 
 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
@@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		pte.eaddr = eaddr;
 		pte.vpage = eaddr >> 12;
 		pte.page_size = MMU_PAGE_64K;
+		pte.wimg = HPTE_R_M;
 	}
 
 	switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 42a4b23..34a5ade 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -46,6 +46,9 @@
 
 #define FUNC(name)		name
 
+#define RFI_TO_KERNEL	RFI
+#define RFI_TO_GUEST	RFI
+
 .macro INTERRUPT_TRAMPOLINE intno
 
 .global kvmppc_trampoline_\intno
@@ -141,7 +144,7 @@
 	GET_SCRATCH0(r13)
 
 	/* And get back into the code */
-	RFI
+	RFI_TO_KERNEL
 #endif
 
 /*
@@ -164,6 +167,6 @@
 	ori	r5, r5, MSR_EE
 	mtsrr0	r7
 	mtsrr1	r6
-	RFI
+	RFI_TO_KERNEL
 
 #include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 2a2b96d..93a180c 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -156,7 +156,7 @@
 	PPC_LL	r9, SVCPU_R9(r3)
 	PPC_LL	r3, (SVCPU_R3)(r3)
 
-	RFI
+	RFI_TO_GUEST
 kvmppc_handler_trampoline_enter_end:
 
 
@@ -407,5 +407,5 @@
 	cmpwi	r12, BOOK3S_INTERRUPT_DOORBELL
 	beqa	BOOK3S_INTERRUPT_DOORBELL
 
-	RFI
+	RFI_TO_KERNEL
 kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 41cf5ae..a95ea00 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
 	}
 }
 
+#ifdef CONFIG_PPC_BOOK3S_64
+void do_rfi_flush_fixups(enum l1d_flush_type types)
+{
+	unsigned int instrs[3], *dest;
+	long *start, *end;
+	int i;
+
+	start = PTRRELOC(&__start___rfi_flush_fixup),
+	end = PTRRELOC(&__stop___rfi_flush_fixup);
+
+	instrs[0] = 0x60000000; /* nop */
+	instrs[1] = 0x60000000; /* nop */
+	instrs[2] = 0x60000000; /* nop */
+
+	if (types & L1D_FLUSH_FALLBACK)
+		/* b .+16 to fallback flush */
+		instrs[0] = 0x48000010;
+
+	i = 0;
+	if (types & L1D_FLUSH_ORI) {
+		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+	}
+
+	if (types & L1D_FLUSH_MTTRIG)
+		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+
+	for (i = 0; start < end; start++, i++) {
+		dest = (void *)start + *start;
+
+		pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+		patch_instruction(dest, instrs[0]);
+		patch_instruction(dest + 1, instrs[1]);
+		patch_instruction(dest + 2, instrs[2]);
+	}
+
+	printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
+}
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
 {
 	long *start, *end;
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 1edfbc1..4fb21e1 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -37,13 +37,62 @@
 #include <asm/kexec.h>
 #include <asm/smp.h>
 #include <asm/tm.h>
+#include <asm/setup.h>
 
 #include "powernv.h"
 
+static void pnv_setup_rfi_flush(void)
+{
+	struct device_node *np, *fw_features;
+	enum l1d_flush_type type;
+	int enable;
+
+	/* Default to fallback in case fw-features are not available */
+	type = L1D_FLUSH_FALLBACK;
+	enable = 1;
+
+	np = of_find_node_by_name(NULL, "ibm,opal");
+	fw_features = of_get_child_by_name(np, "fw-features");
+	of_node_put(np);
+
+	if (fw_features) {
+		np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
+		if (np && of_property_read_bool(np, "enabled"))
+			type = L1D_FLUSH_MTTRIG;
+
+		of_node_put(np);
+
+		np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
+		if (np && of_property_read_bool(np, "enabled"))
+			type = L1D_FLUSH_ORI;
+
+		of_node_put(np);
+
+		/* Enable unless firmware says NOT to */
+		enable = 2;
+		np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
+		if (np && of_property_read_bool(np, "disabled"))
+			enable--;
+
+		of_node_put(np);
+
+		np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
+		if (np && of_property_read_bool(np, "disabled"))
+			enable--;
+
+		of_node_put(np);
+		of_node_put(fw_features);
+	}
+
+	setup_rfi_flush(type, enable > 0);
+}
+
 static void __init pnv_setup_arch(void)
 {
 	set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
 
+	pnv_setup_rfi_flush();
+
 	/* Initialize SMP */
 	pnv_smp_init();
 
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 6e35780..a0b20c0 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -574,11 +574,26 @@ static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
 
 static CLASS_ATTR_RW(dlpar);
 
-static int __init pseries_dlpar_init(void)
+int __init dlpar_workqueue_init(void)
 {
+	if (pseries_hp_wq)
+		return 0;
+
 	pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
-					WQ_UNBOUND, 1);
+			WQ_UNBOUND, 1);
+
+	return pseries_hp_wq ? 0 : -ENOMEM;
+}
+
+static int __init dlpar_sysfs_init(void)
+{
+	int rc;
+
+	rc = dlpar_workqueue_init();
+	if (rc)
+		return rc;
+
 	return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
 }
-machine_device_initcall(pseries, pseries_dlpar_init);
+machine_device_initcall(pseries, dlpar_sysfs_init);
 
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 4470a31..1ae1d9f 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -98,4 +98,6 @@ static inline unsigned long cmo_get_page_size(void)
 	return CMO_PageSize;
 }
 
+int dlpar_workqueue_init(void);
+
 #endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 4923ffe..81d8614 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -69,7 +69,8 @@ static int __init init_ras_IRQ(void)
 	/* Hotplug Events */
 	np = of_find_node_by_path("/event-sources/hot-plug-events");
 	if (np != NULL) {
-		request_event_sources_irqs(np, ras_hotplug_interrupt,
+		if (dlpar_workqueue_init() == 0)
+			request_event_sources_irqs(np, ras_hotplug_interrupt,
 					   "RAS_HOTPLUG");
 		of_node_put(np);
 	}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index a8531e0..ae4f596 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -459,6 +459,39 @@ static void __init find_and_init_phbs(void)
 	of_pci_check_probe_only();
 }
 
+static void pseries_setup_rfi_flush(void)
+{
+	struct h_cpu_char_result result;
+	enum l1d_flush_type types;
+	bool enable;
+	long rc;
+
+	/* Enable by default */
+	enable = true;
+
+	rc = plpar_get_cpu_characteristics(&result);
+	if (rc == H_SUCCESS) {
+		types = L1D_FLUSH_NONE;
+
+		if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
+			types |= L1D_FLUSH_MTTRIG;
+		if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
+			types |= L1D_FLUSH_ORI;
+
+		/* Use fallback if nothing set in hcall */
+		if (types == L1D_FLUSH_NONE)
+			types = L1D_FLUSH_FALLBACK;
+
+		if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
+			enable = false;
+	} else {
+		/* Default to fallback if case hcall is not available */
+		types = L1D_FLUSH_FALLBACK;
+	}
+
+	setup_rfi_flush(types, enable);
+}
+
 static void __init pSeries_setup_arch(void)
 {
 	set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
@@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(void)
 
 	fwnmi_init();
 
+	pseries_setup_rfi_flush();
+
 	/* By default, only probe PCI (can be overridden by rtas_pci) */
 	pci_add_flags(PCI_PROBE_ONLY);
 
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 45dc623..bc2204f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -55,7 +55,6 @@
 	select ARCH_HAS_GCOV_PROFILE_ALL
 	select ARCH_HAS_KCOV			if X86_64
 	select ARCH_HAS_PMEM_API		if X86_64
-	# Causing hangs/crashes, see the commit that added this change for details.
 	select ARCH_HAS_REFCOUNT
 	select ARCH_HAS_UACCESS_FLUSHCACHE	if X86_64
 	select ARCH_HAS_SET_MEMORY
@@ -89,6 +88,7 @@
 	select GENERIC_CLOCKEVENTS_MIN_ADJUST
 	select GENERIC_CMOS_UPDATE
 	select GENERIC_CPU_AUTOPROBE
+	select GENERIC_CPU_VULNERABILITIES
 	select GENERIC_EARLY_IOREMAP
 	select GENERIC_FIND_FIRST_BIT
 	select GENERIC_IOMAP
@@ -154,7 +154,7 @@
 	select HAVE_KERNEL_XZ
 	select HAVE_KPROBES
 	select HAVE_KPROBES_ON_FTRACE
-	select HAVE_KPROBE_OVERRIDE
+	select HAVE_FUNCTION_ERROR_INJECTION
 	select HAVE_KRETPROBES
 	select HAVE_KVM
 	select HAVE_LIVEPATCH			if X86_64
@@ -430,6 +430,19 @@
        def_bool y
        depends on X86_GOLDFISH
 
+config RETPOLINE
+	bool "Avoid speculative indirect branches in kernel"
+	default y
+	help
+	  Compile kernel with the retpoline compiler options to guard against
+	  kernel-to-user data leaks by avoiding speculative indirect
+	  branches. Requires a compiler with -mindirect-branch=thunk-extern
+	  support for full protection. The kernel may run slower.
+
+	  Without compiler support, at least indirect branches in assembler
+	  code are eliminated. Since this includes the syscall entry path,
+	  it is not entirely pointless.
+
 config INTEL_RDT
 	bool "Intel Resource Director Technology support"
 	default n
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 3e73bc2..fad5516 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -230,6 +230,14 @@
 #
 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
 
+# Avoid indirect branches in kernel to deal with Spectre
+ifdef CONFIG_RETPOLINE
+    RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
+    ifneq ($(RETPOLINE_CFLAGS),)
+        KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
+    endif
+endif
+
 archscripts: scripts_basic
 	$(Q)$(MAKE) $(build)=arch/x86/tools relocs
 
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 16627fe..3d09e3a 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -32,6 +32,7 @@
 #include <linux/linkage.h>
 #include <asm/inst.h>
 #include <asm/frame.h>
+#include <asm/nospec-branch.h>
 
 /*
  * The following macros are used to move an (un)aligned 16 byte value to/from
@@ -2884,7 +2885,7 @@
 	pxor INC, STATE4
 	movdqu IV, 0x30(OUTP)
 
-	call *%r11
+	CALL_NOSPEC %r11
 
 	movdqu 0x00(OUTP), INC
 	pxor INC, STATE1
@@ -2929,7 +2930,7 @@
 	_aesni_gf128mul_x_ble()
 	movups IV, (IVP)
 
-	call *%r11
+	CALL_NOSPEC %r11
 
 	movdqu 0x40(OUTP), INC
 	pxor INC, STATE1
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index f7c495e..a14af6e 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -17,6 +17,7 @@
 
 #include <linux/linkage.h>
 #include <asm/frame.h>
+#include <asm/nospec-branch.h>
 
 #define CAMELLIA_TABLE_BYTE_LEN 272
 
@@ -1227,7 +1228,7 @@
 	vpxor 14 * 16(%rax), %xmm15, %xmm14;
 	vpxor 15 * 16(%rax), %xmm15, %xmm15;
 
-	call *%r9;
+	CALL_NOSPEC %r9;
 
 	addq $(16 * 16), %rsp;
 
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index eee5b39..b66bbfa 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -12,6 +12,7 @@
 
 #include <linux/linkage.h>
 #include <asm/frame.h>
+#include <asm/nospec-branch.h>
 
 #define CAMELLIA_TABLE_BYTE_LEN 272
 
@@ -1343,7 +1344,7 @@
 	vpxor 14 * 32(%rax), %ymm15, %ymm14;
 	vpxor 15 * 32(%rax), %ymm15, %ymm15;
 
-	call *%r9;
+	CALL_NOSPEC %r9;
 
 	addq $(16 * 32), %rsp;
 
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index 7a7de27..d9b734d 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -45,6 +45,7 @@
 
 #include <asm/inst.h>
 #include <linux/linkage.h>
+#include <asm/nospec-branch.h>
 
 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
 
@@ -172,7 +173,7 @@
 	movzxw  (bufp, %rax, 2), len
 	lea	crc_array(%rip), bufp
 	lea     (bufp, len, 1), bufp
-	jmp     *bufp
+	JMP_NOSPEC bufp
 
 	################################################################
 	## 2a) PROCESS FULL BLOCKS:
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 45a63e0..3f48f69 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -198,8 +198,11 @@ For 32-bit we have the following conventions - kernel is built with
  * PAGE_TABLE_ISOLATION PGDs are 8k.  Flip bit 12 to switch between the two
  * halves:
  */
-#define PTI_SWITCH_PGTABLES_MASK	(1<<PAGE_SHIFT)
-#define PTI_SWITCH_MASK		(PTI_SWITCH_PGTABLES_MASK|(1<<X86_CR3_PTI_SWITCH_BIT))
+#define PTI_USER_PGTABLE_BIT		PAGE_SHIFT
+#define PTI_USER_PGTABLE_MASK		(1 << PTI_USER_PGTABLE_BIT)
+#define PTI_USER_PCID_BIT		X86_CR3_PTI_PCID_USER_BIT
+#define PTI_USER_PCID_MASK		(1 << PTI_USER_PCID_BIT)
+#define PTI_USER_PGTABLE_AND_PCID_MASK  (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
 
 .macro SET_NOFLUSH_BIT	reg:req
 	bts	$X86_CR3_PCID_NOFLUSH_BIT, \reg
@@ -208,7 +211,7 @@ For 32-bit we have the following conventions - kernel is built with
 .macro ADJUST_KERNEL_CR3 reg:req
 	ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
 	/* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
-	andq    $(~PTI_SWITCH_MASK), \reg
+	andq    $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
 .endm
 
 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
@@ -239,15 +242,19 @@ For 32-bit we have the following conventions - kernel is built with
 	/* Flush needed, clear the bit */
 	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
 	movq	\scratch_reg2, \scratch_reg
-	jmp	.Lwrcr3_\@
+	jmp	.Lwrcr3_pcid_\@
 
 .Lnoflush_\@:
 	movq	\scratch_reg2, \scratch_reg
 	SET_NOFLUSH_BIT \scratch_reg
 
+.Lwrcr3_pcid_\@:
+	/* Flip the ASID to the user version */
+	orq	$(PTI_USER_PCID_MASK), \scratch_reg
+
 .Lwrcr3_\@:
-	/* Flip the PGD and ASID to the user version */
-	orq     $(PTI_SWITCH_MASK), \scratch_reg
+	/* Flip the PGD to the user version */
+	orq     $(PTI_USER_PGTABLE_MASK), \scratch_reg
 	mov	\scratch_reg, %cr3
 .Lend_\@:
 .endm
@@ -263,17 +270,12 @@ For 32-bit we have the following conventions - kernel is built with
 	movq	%cr3, \scratch_reg
 	movq	\scratch_reg, \save_reg
 	/*
-	 * Is the "switch mask" all zero?  That means that both of
-	 * these are zero:
-	 *
-	 *	1. The user/kernel PCID bit, and
-	 *	2. The user/kernel "bit" that points CR3 to the
-	 *	   bottom half of the 8k PGD
-	 *
-	 * That indicates a kernel CR3 value, not a user CR3.
+	 * Test the user pagetable bit. If set, then the user page tables
+	 * are active. If clear CR3 already has the kernel page table
+	 * active.
 	 */
-	testq	$(PTI_SWITCH_MASK), \scratch_reg
-	jz	.Ldone_\@
+	bt	$PTI_USER_PGTABLE_BIT, \scratch_reg
+	jnc	.Ldone_\@
 
 	ADJUST_KERNEL_CR3 \scratch_reg
 	movq	\scratch_reg, %cr3
@@ -290,7 +292,7 @@ For 32-bit we have the following conventions - kernel is built with
 	 * KERNEL pages can always resume with NOFLUSH as we do
 	 * explicit flushes.
 	 */
-	bt	$X86_CR3_PTI_SWITCH_BIT, \save_reg
+	bt	$PTI_USER_PGTABLE_BIT, \save_reg
 	jnc	.Lnoflush_\@
 
 	/*
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index ace8f32..a1f28a5 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -44,6 +44,7 @@
 #include <asm/asm.h>
 #include <asm/smap.h>
 #include <asm/frame.h>
+#include <asm/nospec-branch.h>
 
 	.section .entry.text, "ax"
 
@@ -290,7 +291,7 @@
 
 	/* kernel thread */
 1:	movl	%edi, %eax
-	call	*%ebx
+	CALL_NOSPEC %ebx
 	/*
 	 * A kernel thread is allowed to return here after successfully
 	 * calling do_execve().  Exit to userspace to complete the execve()
@@ -919,7 +920,7 @@
 	movl	%ecx, %es
 	TRACE_IRQS_OFF
 	movl	%esp, %eax			# pt_regs pointer
-	call	*%edi
+	CALL_NOSPEC %edi
 	jmp	ret_from_exception
 END(common_exception)
 
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index f048e38..4f8e1d3 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -37,6 +37,7 @@
 #include <asm/pgtable_types.h>
 #include <asm/export.h>
 #include <asm/frame.h>
+#include <asm/nospec-branch.h>
 #include <linux/err.h>
 
 #include "calling.h"
@@ -191,7 +192,7 @@
 	 */
 	pushq	%rdi
 	movq	$entry_SYSCALL_64_stage2, %rdi
-	jmp	*%rdi
+	JMP_NOSPEC %rdi
 END(entry_SYSCALL_64_trampoline)
 
 	.popsection
@@ -270,7 +271,12 @@
 	 * It might end up jumping to the slow path.  If it jumps, RAX
 	 * and all argument registers are clobbered.
 	 */
+#ifdef CONFIG_RETPOLINE
+	movq	sys_call_table(, %rax, 8), %rax
+	call	__x86_indirect_thunk_rax
+#else
 	call	*sys_call_table(, %rax, 8)
+#endif
 .Lentry_SYSCALL_64_after_fastpath_call:
 
 	movq	%rax, RAX(%rsp)
@@ -442,7 +448,7 @@
 	jmp	entry_SYSCALL64_slow_path
 
 1:
-	jmp	*%rax				/* Called from C */
+	JMP_NOSPEC %rax				/* Called from C */
 END(stub_ptregs_64)
 
 .macro ptregs_stub func
@@ -521,7 +527,7 @@
 1:
 	/* kernel thread */
 	movq	%r12, %rdi
-	call	*%rbx
+	CALL_NOSPEC %rbx
 	/*
 	 * A kernel thread is allowed to return here after successfully
 	 * calling do_execve().  Exit to userspace to complete the execve()
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 141e07b..24ffa1e 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -582,6 +582,24 @@ static __init int bts_init(void)
 	if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
 		return -ENODEV;
 
+	if (boot_cpu_has(X86_FEATURE_PTI)) {
+		/*
+		 * BTS hardware writes through a virtual memory map we must
+		 * either use the kernel physical map, or the user mapping of
+		 * the AUX buffer.
+		 *
+		 * However, since this driver supports per-CPU and per-task inherit
+		 * we cannot use the user mapping since it will not be availble
+		 * if we're not running the owning process.
+		 *
+		 * With PTI we can't use the kernal map either, because its not
+		 * there when we run userspace.
+		 *
+		 * For now, disable this driver when using PTI.
+		 */
+		return -ENODEV;
+	}
+
 	bts_pmu.capabilities	= PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
 				  PERF_PMU_CAP_EXCLUSIVE;
 	bts_pmu.task_ctx_nr	= perf_sw_context;
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index ff700d8..0927cdc 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -11,7 +11,32 @@
 #include <asm/pgtable.h>
 #include <asm/special_insns.h>
 #include <asm/preempt.h>
+#include <asm/asm.h>
 
 #ifndef CONFIG_X86_CMPXCHG64
 extern void cmpxchg8b_emu(void);
 #endif
+
+#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_X86_32
+#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void);
+#else
+#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void);
+INDIRECT_THUNK(8)
+INDIRECT_THUNK(9)
+INDIRECT_THUNK(10)
+INDIRECT_THUNK(11)
+INDIRECT_THUNK(12)
+INDIRECT_THUNK(13)
+INDIRECT_THUNK(14)
+INDIRECT_THUNK(15)
+#endif
+INDIRECT_THUNK(ax)
+INDIRECT_THUNK(bx)
+INDIRECT_THUNK(cx)
+INDIRECT_THUNK(dx)
+INDIRECT_THUNK(si)
+INDIRECT_THUNK(di)
+INDIRECT_THUNK(bp)
+INDIRECT_THUNK(sp)
+#endif /* CONFIG_RETPOLINE */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 21ac898..f275447 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -203,6 +203,8 @@
 #define X86_FEATURE_PROC_FEEDBACK	( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_SME			( 7*32+10) /* AMD Secure Memory Encryption */
 #define X86_FEATURE_PTI			( 7*32+11) /* Kernel Page Table Isolation enabled */
+#define X86_FEATURE_RETPOLINE		( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_AMD	( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
 #define X86_FEATURE_INTEL_PPIN		( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_INTEL_PT		( 7*32+15) /* Intel Processor Trace */
 #define X86_FEATURE_AVX512_4VNNIW	( 7*32+16) /* AVX-512 Neural Network Instructions */
@@ -342,5 +344,7 @@
 #define X86_BUG_MONITOR			X86_BUG(12) /* IPI required to wake up remote CPU */
 #define X86_BUG_AMD_E400		X86_BUG(13) /* CPU is among the affected by Erratum 400 */
 #define X86_BUG_CPU_MELTDOWN		X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+#define X86_BUG_SPECTRE_V1		X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+#define X86_BUG_SPECTRE_V2		X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/error-injection.h b/arch/x86/include/asm/error-injection.h
new file mode 100644
index 0000000..47b7a12
--- /dev/null
+++ b/arch/x86/include/asm/error-injection.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ERROR_INJECTION_H
+#define _ASM_ERROR_INJECTION_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm-generic/error-injection.h>
+
+asmlinkage void just_return_func(void);
+void override_function_with_return(struct pt_regs *regs);
+
+#endif /* _ASM_ERROR_INJECTION_H */
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 36abb23..367d99c 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -67,9 +67,7 @@ extern const int kretprobe_blacklist_size;
 void arch_remove_kprobe(struct kprobe *p);
 asmlinkage void kretprobe_trampoline(void);
 
-#ifdef CONFIG_KPROBES_ON_FTRACE
-extern void arch_ftrace_kprobe_override_function(struct pt_regs *regs);
-#endif
+extern void arch_kprobe_override_function(struct pt_regs *regs);
 
 /* Architecture specific copy of original instruction*/
 struct arch_specific_insn {
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 5400add..8bf450b 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -7,6 +7,7 @@
 #include <linux/nmi.h>
 #include <asm/io.h>
 #include <asm/hyperv.h>
+#include <asm/nospec-branch.h>
 
 /*
  * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
@@ -186,10 +187,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
 		return U64_MAX;
 
 	__asm__ __volatile__("mov %4, %%r8\n"
-			     "call *%5"
+			     CALL_NOSPEC
 			     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
 			       "+c" (control), "+d" (input_address)
-			     :  "r" (output_address), "m" (hv_hypercall_pg)
+			     :  "r" (output_address),
+				THUNK_TARGET(hv_hypercall_pg)
 			     : "cc", "memory", "r8", "r9", "r10", "r11");
 #else
 	u32 input_address_hi = upper_32_bits(input_address);
@@ -200,13 +202,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
 	if (!hv_hypercall_pg)
 		return U64_MAX;
 
-	__asm__ __volatile__("call *%7"
+	__asm__ __volatile__(CALL_NOSPEC
 			     : "=A" (hv_status),
 			       "+c" (input_address_lo), ASM_CALL_CONSTRAINT
 			     : "A" (control),
 			       "b" (input_address_hi),
 			       "D"(output_address_hi), "S"(output_address_lo),
-			       "m" (hv_hypercall_pg)
+			       THUNK_TARGET(hv_hypercall_pg)
 			     : "cc", "memory");
 #endif /* !x86_64 */
 	return hv_status;
@@ -227,10 +229,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
 
 #ifdef CONFIG_X86_64
 	{
-		__asm__ __volatile__("call *%4"
+		__asm__ __volatile__(CALL_NOSPEC
 				     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
 				       "+c" (control), "+d" (input1)
-				     : "m" (hv_hypercall_pg)
+				     : THUNK_TARGET(hv_hypercall_pg)
 				     : "cc", "r8", "r9", "r10", "r11");
 	}
 #else
@@ -238,13 +240,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
 		u32 input1_hi = upper_32_bits(input1);
 		u32 input1_lo = lower_32_bits(input1);
 
-		__asm__ __volatile__ ("call *%5"
+		__asm__ __volatile__ (CALL_NOSPEC
 				      : "=A"(hv_status),
 					"+c"(input1_lo),
 					ASM_CALL_CONSTRAINT
 				      :	"A" (control),
 					"b" (input1_hi),
-					"m" (hv_hypercall_pg)
+					THUNK_TARGET(hv_hypercall_pg)
 				      : "cc", "edi", "esi");
 	}
 #endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 34c4922..e7b983a 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -355,6 +355,9 @@
 #define FAM10H_MMIO_CONF_BASE_MASK	0xfffffffULL
 #define FAM10H_MMIO_CONF_BASE_SHIFT	20
 #define MSR_FAM10H_NODE_ID		0xc001100c
+#define MSR_F10H_DECFG			0xc0011029
+#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT	1
+#define MSR_F10H_DECFG_LFENCE_SERIALIZE		BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
 
 /* K8 MSRs */
 #define MSR_K8_TOP_MEM1			0xc001001a
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
new file mode 100644
index 0000000..402a11c
--- /dev/null
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NOSPEC_BRANCH_H__
+#define __NOSPEC_BRANCH_H__
+
+#include <asm/alternative.h>
+#include <asm/alternative-asm.h>
+#include <asm/cpufeatures.h>
+
+/*
+ * Fill the CPU return stack buffer.
+ *
+ * Each entry in the RSB, if used for a speculative 'ret', contains an
+ * infinite 'pause; jmp' loop to capture speculative execution.
+ *
+ * This is required in various cases for retpoline and IBRS-based
+ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+ * eliminate potentially bogus entries from the RSB, and sometimes
+ * purely to ensure that it doesn't get empty, which on some CPUs would
+ * allow predictions from other (unwanted!) sources to be used.
+ *
+ * We define a CPP macro such that it can be used from both .S files and
+ * inline assembly. It's possible to do a .macro and then include that
+ * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
+ */
+
+#define RSB_CLEAR_LOOPS		32	/* To forcibly overwrite all entries */
+#define RSB_FILL_LOOPS		16	/* To avoid underflow */
+
+/*
+ * Google experimented with loop-unrolling and this turned out to be
+ * the optimal version — two calls, each with their own speculation
+ * trap should their return address end up getting used, in a loop.
+ */
+#define __FILL_RETURN_BUFFER(reg, nr, sp)	\
+	mov	$(nr/2), reg;			\
+771:						\
+	call	772f;				\
+773:	/* speculation trap */			\
+	pause;					\
+	jmp	773b;				\
+772:						\
+	call	774f;				\
+775:	/* speculation trap */			\
+	pause;					\
+	jmp	775b;				\
+774:						\
+	dec	reg;				\
+	jnz	771b;				\
+	add	$(BITS_PER_LONG/8) * nr, sp;
+
+#ifdef __ASSEMBLY__
+
+/*
+ * This should be used immediately before a retpoline alternative.  It tells
+ * objtool where the retpolines are so that it can make sense of the control
+ * flow by just reading the original instruction(s) and ignoring the
+ * alternatives.
+ */
+.macro ANNOTATE_NOSPEC_ALTERNATIVE
+	.Lannotate_\@:
+	.pushsection .discard.nospec
+	.long .Lannotate_\@ - .
+	.popsection
+.endm
+
+/*
+ * These are the bare retpoline primitives for indirect jmp and call.
+ * Do not use these directly; they only exist to make the ALTERNATIVE
+ * invocation below less ugly.
+ */
+.macro RETPOLINE_JMP reg:req
+	call	.Ldo_rop_\@
+.Lspec_trap_\@:
+	pause
+	jmp	.Lspec_trap_\@
+.Ldo_rop_\@:
+	mov	\reg, (%_ASM_SP)
+	ret
+.endm
+
+/*
+ * This is a wrapper around RETPOLINE_JMP so the called function in reg
+ * returns to the instruction after the macro.
+ */
+.macro RETPOLINE_CALL reg:req
+	jmp	.Ldo_call_\@
+.Ldo_retpoline_jmp_\@:
+	RETPOLINE_JMP \reg
+.Ldo_call_\@:
+	call	.Ldo_retpoline_jmp_\@
+.endm
+
+/*
+ * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
+ * indirect jmp/call which may be susceptible to the Spectre variant 2
+ * attack.
+ */
+.macro JMP_NOSPEC reg:req
+#ifdef CONFIG_RETPOLINE
+	ANNOTATE_NOSPEC_ALTERNATIVE
+	ALTERNATIVE_2 __stringify(jmp *\reg),				\
+		__stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE,	\
+		__stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
+#else
+	jmp	*\reg
+#endif
+.endm
+
+.macro CALL_NOSPEC reg:req
+#ifdef CONFIG_RETPOLINE
+	ANNOTATE_NOSPEC_ALTERNATIVE
+	ALTERNATIVE_2 __stringify(call *\reg),				\
+		__stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
+		__stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
+#else
+	call	*\reg
+#endif
+.endm
+
+ /*
+  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
+  * monstrosity above, manually.
+  */
+.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
+#ifdef CONFIG_RETPOLINE
+	ANNOTATE_NOSPEC_ALTERNATIVE
+	ALTERNATIVE "jmp .Lskip_rsb_\@",				\
+		__stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP))	\
+		\ftr
+.Lskip_rsb_\@:
+#endif
+.endm
+
+#else /* __ASSEMBLY__ */
+
+#define ANNOTATE_NOSPEC_ALTERNATIVE				\
+	"999:\n\t"						\
+	".pushsection .discard.nospec\n\t"			\
+	".long 999b - .\n\t"					\
+	".popsection\n\t"
+
+#if defined(CONFIG_X86_64) && defined(RETPOLINE)
+
+/*
+ * Since the inline asm uses the %V modifier which is only in newer GCC,
+ * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
+ */
+# define CALL_NOSPEC						\
+	ANNOTATE_NOSPEC_ALTERNATIVE				\
+	ALTERNATIVE(						\
+	"call *%[thunk_target]\n",				\
+	"call __x86_indirect_thunk_%V[thunk_target]\n",		\
+	X86_FEATURE_RETPOLINE)
+# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
+
+#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
+/*
+ * For i386 we use the original ret-equivalent retpoline, because
+ * otherwise we'll run out of registers. We don't care about CET
+ * here, anyway.
+ */
+# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n",	\
+	"       jmp    904f;\n"					\
+	"       .align 16\n"					\
+	"901:	call   903f;\n"					\
+	"902:	pause;\n"					\
+	"       jmp    902b;\n"					\
+	"       .align 16\n"					\
+	"903:	addl   $4, %%esp;\n"				\
+	"       pushl  %[thunk_target];\n"			\
+	"       ret;\n"						\
+	"       .align 16\n"					\
+	"904:	call   901b;\n",				\
+	X86_FEATURE_RETPOLINE)
+
+# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+#else /* No retpoline for C / inline asm */
+# define CALL_NOSPEC "call *%[thunk_target]\n"
+# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+#endif
+
+/* The Spectre V2 mitigation variants */
+enum spectre_v2_mitigation {
+	SPECTRE_V2_NONE,
+	SPECTRE_V2_RETPOLINE_MINIMAL,
+	SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
+	SPECTRE_V2_RETPOLINE_GENERIC,
+	SPECTRE_V2_RETPOLINE_AMD,
+	SPECTRE_V2_IBRS,
+};
+
+/*
+ * On VMEXIT we must ensure that no RSB predictions learned in the guest
+ * can be followed in the host, by overwriting the RSB completely. Both
+ * retpoline and IBRS mitigations for Spectre v2 need this; only on future
+ * CPUs with IBRS_ATT *might* it be avoided.
+ */
+static inline void vmexit_fill_RSB(void)
+{
+#ifdef CONFIG_RETPOLINE
+	unsigned long loops = RSB_CLEAR_LOOPS / 2;
+
+	asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
+		      ALTERNATIVE("jmp 910f",
+				  __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
+				  X86_FEATURE_RETPOLINE)
+		      "910:"
+		      : "=&r" (loops), ASM_CALL_CONSTRAINT
+		      : "r" (loops) : "memory" );
+#endif
+}
+#endif /* __ASSEMBLY__ */
+#endif /* __NOSPEC_BRANCH_H__ */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 7a5d669..eb66fa9 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -38,6 +38,7 @@ do {						\
 #define PCI_NOASSIGN_ROMS	0x80000
 #define PCI_ROOT_NO_CRS		0x100000
 #define PCI_NOASSIGN_BARS	0x200000
+#define PCI_BIG_ROOT_WINDOW	0x400000
 
 extern unsigned int pci_probe;
 extern unsigned long pirq_table_addr;
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 6a60fea..625a52a 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -40,7 +40,7 @@
 #define CR3_NOFLUSH	BIT_ULL(63)
 
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
-# define X86_CR3_PTI_SWITCH_BIT	11
+# define X86_CR3_PTI_PCID_USER_BIT	11
 #endif
 
 #else
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 4a08dd2..d33e4a2 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -81,13 +81,13 @@ static inline u16 kern_pcid(u16 asid)
 	 * Make sure that the dynamic ASID space does not confict with the
 	 * bit we are using to switch between user and kernel ASIDs.
 	 */
-	BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_SWITCH_BIT));
+	BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
 
 	/*
 	 * The ASID being passed in here should have respected the
 	 * MAX_ASID_AVAILABLE and thus never have the switch bit set.
 	 */
-	VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_SWITCH_BIT));
+	VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
 #endif
 	/*
 	 * The dynamically-assigned ASIDs that get passed in are small
@@ -112,7 +112,7 @@ static inline u16 user_pcid(u16 asid)
 {
 	u16 ret = kern_pcid(asid);
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
-	ret |= 1 << X86_CR3_PTI_SWITCH_BIT;
+	ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
 #endif
 	return ret;
 }
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 7cb282e..bfd8826 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -44,6 +44,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/smap.h>
+#include <asm/nospec-branch.h>
 
 #include <xen/interface/xen.h>
 #include <xen/interface/sched.h>
@@ -217,9 +218,9 @@ privcmd_call(unsigned call,
 	__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
 
 	stac();
-	asm volatile("call *%[call]"
+	asm volatile(CALL_NOSPEC
 		     : __HYPERCALL_5PARAM
-		     : [call] "a" (&hypercall_page[call])
+		     : [thunk_target] "a" (&hypercall_page[call])
 		     : __HYPERCALL_CLOBBER5);
 	clac();
 
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index dbaf14d..4817d74 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -344,9 +344,12 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
 {
 	unsigned long flags;
+	int i;
 
-	if (instr[0] != 0x90)
-		return;
+	for (i = 0; i < a->padlen; i++) {
+		if (instr[i] != 0x90)
+			return;
+	}
 
 	local_irq_save(flags);
 	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index bcb75dc..ea831c8 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -829,8 +829,32 @@ static void init_amd(struct cpuinfo_x86 *c)
 		set_cpu_cap(c, X86_FEATURE_K8);
 
 	if (cpu_has(c, X86_FEATURE_XMM2)) {
-		/* MFENCE stops RDTSC speculation */
-		set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+		unsigned long long val;
+		int ret;
+
+		/*
+		 * A serializing LFENCE has less overhead than MFENCE, so
+		 * use it for execution serialization.  On families which
+		 * don't have that MSR, LFENCE is already serializing.
+		 * msr_set_bit() uses the safe accessors, too, even if the MSR
+		 * is not present.
+		 */
+		msr_set_bit(MSR_F10H_DECFG,
+			    MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+
+		/*
+		 * Verify that the MSR write was successful (could be running
+		 * under a hypervisor) and only then assume that LFENCE is
+		 * serializing.
+		 */
+		ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
+		if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
+			/* A serializing LFENCE stops RDTSC speculation */
+			set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+		} else {
+			/* MFENCE stops RDTSC speculation */
+			set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+		}
 	}
 
 	/*
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index ba0b242..e4dc261 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -10,6 +10,10 @@
  */
 #include <linux/init.h>
 #include <linux/utsname.h>
+#include <linux/cpu.h>
+
+#include <asm/nospec-branch.h>
+#include <asm/cmdline.h>
 #include <asm/bugs.h>
 #include <asm/processor.h>
 #include <asm/processor-flags.h>
@@ -20,6 +24,8 @@
 #include <asm/pgtable.h>
 #include <asm/set_memory.h>
 
+static void __init spectre_v2_select_mitigation(void);
+
 void __init check_bugs(void)
 {
 	identify_boot_cpu();
@@ -29,6 +35,9 @@ void __init check_bugs(void)
 		print_cpu_info(&boot_cpu_data);
 	}
 
+	/* Select the proper spectre mitigation before patching alternatives */
+	spectre_v2_select_mitigation();
+
 #ifdef CONFIG_X86_32
 	/*
 	 * Check whether we are able to run this kernel safely on SMP.
@@ -60,3 +69,179 @@ void __init check_bugs(void)
 		set_memory_4k((unsigned long)__va(0), 1);
 #endif
 }
+
+/* The kernel command line selection */
+enum spectre_v2_mitigation_cmd {
+	SPECTRE_V2_CMD_NONE,
+	SPECTRE_V2_CMD_AUTO,
+	SPECTRE_V2_CMD_FORCE,
+	SPECTRE_V2_CMD_RETPOLINE,
+	SPECTRE_V2_CMD_RETPOLINE_GENERIC,
+	SPECTRE_V2_CMD_RETPOLINE_AMD,
+};
+
+static const char *spectre_v2_strings[] = {
+	[SPECTRE_V2_NONE]			= "Vulnerable",
+	[SPECTRE_V2_RETPOLINE_MINIMAL]		= "Vulnerable: Minimal generic ASM retpoline",
+	[SPECTRE_V2_RETPOLINE_MINIMAL_AMD]	= "Vulnerable: Minimal AMD ASM retpoline",
+	[SPECTRE_V2_RETPOLINE_GENERIC]		= "Mitigation: Full generic retpoline",
+	[SPECTRE_V2_RETPOLINE_AMD]		= "Mitigation: Full AMD retpoline",
+};
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "Spectre V2 mitigation: " fmt
+
+static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+
+static void __init spec2_print_if_insecure(const char *reason)
+{
+	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+		pr_info("%s\n", reason);
+}
+
+static void __init spec2_print_if_secure(const char *reason)
+{
+	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+		pr_info("%s\n", reason);
+}
+
+static inline bool retp_compiler(void)
+{
+	return __is_defined(RETPOLINE);
+}
+
+static inline bool match_option(const char *arg, int arglen, const char *opt)
+{
+	int len = strlen(opt);
+
+	return len == arglen && !strncmp(arg, opt, len);
+}
+
+static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
+{
+	char arg[20];
+	int ret;
+
+	ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
+				  sizeof(arg));
+	if (ret > 0)  {
+		if (match_option(arg, ret, "off")) {
+			goto disable;
+		} else if (match_option(arg, ret, "on")) {
+			spec2_print_if_secure("force enabled on command line.");
+			return SPECTRE_V2_CMD_FORCE;
+		} else if (match_option(arg, ret, "retpoline")) {
+			spec2_print_if_insecure("retpoline selected on command line.");
+			return SPECTRE_V2_CMD_RETPOLINE;
+		} else if (match_option(arg, ret, "retpoline,amd")) {
+			if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+				pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
+				return SPECTRE_V2_CMD_AUTO;
+			}
+			spec2_print_if_insecure("AMD retpoline selected on command line.");
+			return SPECTRE_V2_CMD_RETPOLINE_AMD;
+		} else if (match_option(arg, ret, "retpoline,generic")) {
+			spec2_print_if_insecure("generic retpoline selected on command line.");
+			return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
+		} else if (match_option(arg, ret, "auto")) {
+			return SPECTRE_V2_CMD_AUTO;
+		}
+	}
+
+	if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+		return SPECTRE_V2_CMD_AUTO;
+disable:
+	spec2_print_if_insecure("disabled on command line.");
+	return SPECTRE_V2_CMD_NONE;
+}
+
+static void __init spectre_v2_select_mitigation(void)
+{
+	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+	enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
+
+	/*
+	 * If the CPU is not affected and the command line mode is NONE or AUTO
+	 * then nothing to do.
+	 */
+	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
+	    (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
+		return;
+
+	switch (cmd) {
+	case SPECTRE_V2_CMD_NONE:
+		return;
+
+	case SPECTRE_V2_CMD_FORCE:
+		/* FALLTRHU */
+	case SPECTRE_V2_CMD_AUTO:
+		goto retpoline_auto;
+
+	case SPECTRE_V2_CMD_RETPOLINE_AMD:
+		if (IS_ENABLED(CONFIG_RETPOLINE))
+			goto retpoline_amd;
+		break;
+	case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
+		if (IS_ENABLED(CONFIG_RETPOLINE))
+			goto retpoline_generic;
+		break;
+	case SPECTRE_V2_CMD_RETPOLINE:
+		if (IS_ENABLED(CONFIG_RETPOLINE))
+			goto retpoline_auto;
+		break;
+	}
+	pr_err("kernel not compiled with retpoline; no mitigation available!");
+	return;
+
+retpoline_auto:
+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+	retpoline_amd:
+		if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
+			pr_err("LFENCE not serializing. Switching to generic retpoline\n");
+			goto retpoline_generic;
+		}
+		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
+					 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
+		setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
+		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+	} else {
+	retpoline_generic:
+		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
+					 SPECTRE_V2_RETPOLINE_MINIMAL;
+		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+	}
+
+	spectre_v2_enabled = mode;
+	pr_info("%s\n", spectre_v2_strings[mode]);
+}
+
+#undef pr_fmt
+
+#ifdef CONFIG_SYSFS
+ssize_t cpu_show_meltdown(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+		return sprintf(buf, "Not affected\n");
+	if (boot_cpu_has(X86_FEATURE_PTI))
+		return sprintf(buf, "Mitigation: PTI\n");
+	return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spectre_v1(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
+		return sprintf(buf, "Not affected\n");
+	return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+		return sprintf(buf, "Not affected\n");
+
+	return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
+}
+#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 39d7ea8..ef29ad0 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -926,6 +926,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 	if (c->x86_vendor != X86_VENDOR_AMD)
 		setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
 
+	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+	setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
 	fpu__init_system(c);
 
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 8ccdca6..d9e460f 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -910,8 +910,17 @@ static bool is_blacklisted(unsigned int cpu)
 {
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-	if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
-		pr_err_once("late loading on model 79 is disabled.\n");
+	/*
+	 * Late loading on model 79 with microcode revision less than 0x0b000021
+	 * may result in a system hang. This behavior is documented in item
+	 * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
+	 */
+	if (c->x86 == 6 &&
+	    c->x86_model == INTEL_FAM6_BROADWELL_X &&
+	    c->x86_mask == 0x01 &&
+	    c->microcode < 0x0b000021) {
+		pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
+		pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
 		return true;
 	}
 
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index b6c6468..4c8440d 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -8,6 +8,7 @@
 #include <asm/segment.h>
 #include <asm/export.h>
 #include <asm/ftrace.h>
+#include <asm/nospec-branch.h>
 
 #ifdef CC_USING_FENTRY
 # define function_hook	__fentry__
@@ -197,7 +198,8 @@
 	movl	0x4(%ebp), %edx
 	subl	$MCOUNT_INSN_SIZE, %eax
 
-	call	*ftrace_trace_function
+	movl	ftrace_trace_function, %ecx
+	CALL_NOSPEC %ecx
 
 	popl	%edx
 	popl	%ecx
@@ -241,5 +243,5 @@
 	movl	%eax, %ecx
 	popl	%edx
 	popl	%eax
-	jmp	*%ecx
+	JMP_NOSPEC %ecx
 #endif
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index c832291..7cb8ba0 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -7,7 +7,7 @@
 #include <asm/ptrace.h>
 #include <asm/ftrace.h>
 #include <asm/export.h>
-
+#include <asm/nospec-branch.h>
 
 	.code64
 	.section .entry.text, "ax"
@@ -286,8 +286,8 @@
 	 * ip and parent ip are used and the list function is called when
 	 * function tracing is enabled.
 	 */
-	call   *ftrace_trace_function
-
+	movq ftrace_trace_function, %r8
+	CALL_NOSPEC %r8
 	restore_mcount_regs
 
 	jmp fgraph_trace
@@ -329,5 +329,5 @@
 	movq 8(%rsp), %rdx
 	movq (%rsp), %rax
 	addq $24, %rsp
-	jmp *%rdi
+	JMP_NOSPEC %rdi
 #endif
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index a83b334..c1bdbd3 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -20,6 +20,7 @@
 #include <linux/mm.h>
 
 #include <asm/apic.h>
+#include <asm/nospec-branch.h>
 
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 
@@ -55,11 +56,11 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
 static void call_on_stack(void *func, void *stack)
 {
 	asm volatile("xchgl	%%ebx,%%esp	\n"
-		     "call	*%%edi		\n"
+		     CALL_NOSPEC
 		     "movl	%%ebx,%%esp	\n"
 		     : "=b" (stack)
 		     : "0" (stack),
-		       "D"(func)
+		       [thunk_target] "D"(func)
 		     : "memory", "cc", "edx", "ecx", "eax");
 }
 
@@ -95,11 +96,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
 		call_on_stack(print_stack_overflow, isp);
 
 	asm volatile("xchgl	%%ebx,%%esp	\n"
-		     "call	*%%edi		\n"
+		     CALL_NOSPEC
 		     "movl	%%ebx,%%esp	\n"
 		     : "=a" (arg1), "=b" (isp)
 		     :  "0" (desc),   "1" (isp),
-			"D" (desc->handle_irq)
+			[thunk_target] "D" (desc->handle_irq)
 		     : "memory", "cc", "ecx");
 	return 1;
 }
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 1ea748d..8dc0161 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -97,17 +97,3 @@ int arch_prepare_kprobe_ftrace(struct kprobe *p)
 	p->ainsn.boostable = false;
 	return 0;
 }
-
-asmlinkage void override_func(void);
-asm(
-	".type override_func, @function\n"
-	"override_func:\n"
-	"	ret\n"
-	".size override_func, .-override_func\n"
-);
-
-void arch_ftrace_kprobe_override_function(struct pt_regs *regs)
-{
-	regs->ip = (unsigned long)&override_func;
-}
-NOKPROBE_SYMBOL(arch_ftrace_kprobe_override_function);
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index a4eb279..a2486f4 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -138,6 +138,17 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
 		return -1;
 	set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
 	pte_unmap(pte);
+
+	/*
+	 * PTI poisons low addresses in the kernel page tables in the
+	 * name of making them unusable for userspace.  To execute
+	 * code at such a low address, the poison must be cleared.
+	 *
+	 * Note: 'pgd' actually gets set in p4d_alloc() _or_
+	 * pud_alloc() depending on 4/5-level paging.
+	 */
+	pgd->pgd &= ~_PAGE_NX;
+
 	return 0;
 }
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c4deb1f..2b8eb4d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3781,7 +3781,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
 {
 	if (unlikely(!lapic_in_kernel(vcpu) ||
-		     kvm_event_needs_reinjection(vcpu)))
+		     kvm_event_needs_reinjection(vcpu) ||
+		     vcpu->arch.exception.pending))
 		return false;
 
 	if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
@@ -5465,30 +5466,34 @@ static void mmu_destroy_caches(void)
 
 int kvm_mmu_module_init(void)
 {
+	int ret = -ENOMEM;
+
 	kvm_mmu_clear_all_pte_masks();
 
 	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
 					    sizeof(struct pte_list_desc),
 					    0, SLAB_ACCOUNT, NULL);
 	if (!pte_list_desc_cache)
-		goto nomem;
+		goto out;
 
 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
 						  sizeof(struct kvm_mmu_page),
 						  0, SLAB_ACCOUNT, NULL);
 	if (!mmu_page_header_cache)
-		goto nomem;
+		goto out;
 
 	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
-		goto nomem;
+		goto out;
 
-	register_shrinker(&mmu_shrinker);
+	ret = register_shrinker(&mmu_shrinker);
+	if (ret)
+		goto out;
 
 	return 0;
 
-nomem:
+out:
 	mmu_destroy_caches();
-	return -ENOMEM;
+	return ret;
 }
 
 /*
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index bb31c80..f40d0da 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -45,6 +45,7 @@
 #include <asm/debugreg.h>
 #include <asm/kvm_para.h>
 #include <asm/irq_remapping.h>
+#include <asm/nospec-branch.h>
 
 #include <asm/virtext.h>
 #include "trace.h"
@@ -361,7 +362,6 @@ static void recalc_intercepts(struct vcpu_svm *svm)
 {
 	struct vmcb_control_area *c, *h;
 	struct nested_state *g;
-	u32 h_intercept_exceptions;
 
 	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 
@@ -372,14 +372,9 @@ static void recalc_intercepts(struct vcpu_svm *svm)
 	h = &svm->nested.hsave->control;
 	g = &svm->nested;
 
-	/* No need to intercept #UD if L1 doesn't intercept it */
-	h_intercept_exceptions =
-		h->intercept_exceptions & ~(1U << UD_VECTOR);
-
 	c->intercept_cr = h->intercept_cr | g->intercept_cr;
 	c->intercept_dr = h->intercept_dr | g->intercept_dr;
-	c->intercept_exceptions =
-		h_intercept_exceptions | g->intercept_exceptions;
+	c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
 	c->intercept = h->intercept | g->intercept;
 }
 
@@ -2202,7 +2197,6 @@ static int ud_interception(struct vcpu_svm *svm)
 {
 	int er;
 
-	WARN_ON_ONCE(is_guest_mode(&svm->vcpu));
 	er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
 	if (er == EMULATE_USER_EXIT)
 		return 0;
@@ -5034,6 +5028,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
 		);
 
+	/* Eliminate branch target predictions from guest mode */
+	vmexit_fill_RSB();
+
 #ifdef CONFIG_X86_64
 	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
 #else
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5c14d65..c829d89 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -50,6 +50,7 @@
 #include <asm/apic.h>
 #include <asm/irq_remapping.h>
 #include <asm/mmu_context.h>
+#include <asm/nospec-branch.h>
 
 #include "trace.h"
 #include "pmu.h"
@@ -899,8 +900,16 @@ static inline short vmcs_field_to_offset(unsigned long field)
 {
 	BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
 
-	if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
-	    vmcs_field_to_offset_table[field] == 0)
+	if (field >= ARRAY_SIZE(vmcs_field_to_offset_table))
+		return -ENOENT;
+
+	/*
+	 * FIXME: Mitigation for CVE-2017-5753.  To be replaced with a
+	 * generic mechanism.
+	 */
+	asm("lfence");
+
+	if (vmcs_field_to_offset_table[field] == 0)
 		return -ENOENT;
 
 	return vmcs_field_to_offset_table[field];
@@ -1887,7 +1896,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
 {
 	u32 eb;
 
-	eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) |
+	eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
 	     (1u << DB_VECTOR) | (1u << AC_VECTOR);
 	if ((vcpu->guest_debug &
 	     (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
@@ -1905,8 +1914,6 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
 	 */
 	if (is_guest_mode(vcpu))
 		eb |= get_vmcs12(vcpu)->exception_bitmap;
-	else
-		eb |= 1u << UD_VECTOR;
 
 	vmcs_write32(EXCEPTION_BITMAP, eb);
 }
@@ -5917,7 +5924,6 @@ static int handle_exception(struct kvm_vcpu *vcpu)
 		return 1;  /* already handled by vmx_vcpu_run() */
 
 	if (is_invalid_opcode(intr_info)) {
-		WARN_ON_ONCE(is_guest_mode(vcpu));
 		er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
 		if (er == EMULATE_USER_EXIT)
 			return 0;
@@ -9485,6 +9491,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
 	      );
 
+	/* Eliminate branch target predictions from guest mode */
+	vmexit_fill_RSB();
+
 	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
 	if (debugctlmsr)
 		update_debugctlmsr(debugctlmsr);
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 7b181b6..25a972c 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -26,6 +26,8 @@
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+lib-$(CONFIG_FUNCTION_ERROR_INJECTION)	+= error-inject.o
+lib-$(CONFIG_RETPOLINE) += retpoline.o
 
 obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
 
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 4d34bb5..46e71a7 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -29,7 +29,8 @@
 #include <asm/errno.h>
 #include <asm/asm.h>
 #include <asm/export.h>
-				
+#include <asm/nospec-branch.h>
+
 /*
  * computes a partial checksum, e.g. for TCP/UDP fragments
  */
@@ -156,7 +157,7 @@
 	negl %ebx
 	lea 45f(%ebx,%ebx,2), %ebx
 	testl %esi, %esi
-	jmp *%ebx
+	JMP_NOSPEC %ebx
 
 	# Handle 2-byte-aligned regions
 20:	addw (%esi), %ax
@@ -439,7 +440,7 @@
 	andl $-32,%edx
 	lea 3f(%ebx,%ebx), %ebx
 	testl %esi, %esi 
-	jmp *%ebx
+	JMP_NOSPEC %ebx
 1:	addl $64,%esi
 	addl $64,%edi 
 	SRC(movb -32(%edx),%bl)	; SRC(movb (%edx),%bl)
diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c
new file mode 100644
index 0000000..7b881d0
--- /dev/null
+++ b/arch/x86/lib/error-inject.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/error-injection.h>
+#include <linux/kprobes.h>
+
+asmlinkage void just_return_func(void);
+
+asm(
+	".type just_return_func, @function\n"
+	"just_return_func:\n"
+	"	ret\n"
+	".size just_return_func, .-just_return_func\n"
+);
+
+void override_function_with_return(struct pt_regs *regs)
+{
+	regs->ip = (unsigned long)&just_return_func;
+}
+NOKPROBE_SYMBOL(override_function_with_return);
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
new file mode 100644
index 0000000..cb45c6c
--- /dev/null
+++ b/arch/x86/lib/retpoline.S
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/stringify.h>
+#include <linux/linkage.h>
+#include <asm/dwarf2.h>
+#include <asm/cpufeatures.h>
+#include <asm/alternative-asm.h>
+#include <asm/export.h>
+#include <asm/nospec-branch.h>
+
+.macro THUNK reg
+	.section .text.__x86.indirect_thunk.\reg
+
+ENTRY(__x86_indirect_thunk_\reg)
+	CFI_STARTPROC
+	JMP_NOSPEC %\reg
+	CFI_ENDPROC
+ENDPROC(__x86_indirect_thunk_\reg)
+.endm
+
+/*
+ * Despite being an assembler file we can't just use .irp here
+ * because __KSYM_DEPS__ only uses the C preprocessor and would
+ * only see one instance of "__x86_indirect_thunk_\reg" rather
+ * than one per register with the correct names. So we do it
+ * the simple and nasty way...
+ */
+#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
+#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
+
+GENERATE_THUNK(_ASM_AX)
+GENERATE_THUNK(_ASM_BX)
+GENERATE_THUNK(_ASM_CX)
+GENERATE_THUNK(_ASM_DX)
+GENERATE_THUNK(_ASM_SI)
+GENERATE_THUNK(_ASM_DI)
+GENERATE_THUNK(_ASM_BP)
+GENERATE_THUNK(_ASM_SP)
+#ifdef CONFIG_64BIT
+GENERATE_THUNK(r8)
+GENERATE_THUNK(r9)
+GENERATE_THUNK(r10)
+GENERATE_THUNK(r11)
+GENERATE_THUNK(r12)
+GENERATE_THUNK(r13)
+GENERATE_THUNK(r14)
+GENERATE_THUNK(r15)
+#endif
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 43d4a4a..ce38f16 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -149,7 +149,7 @@ pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
  *
  * Returns a pointer to a P4D on success, or NULL on failure.
  */
-static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
+static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
 {
 	pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
@@ -164,12 +164,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
 		if (!new_p4d_page)
 			return NULL;
 
-		if (pgd_none(*pgd)) {
-			set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
-			new_p4d_page = 0;
-		}
-		if (new_p4d_page)
-			free_page(new_p4d_page);
+		set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
 	}
 	BUILD_BUG_ON(pgd_large(*pgd) != 0);
 
@@ -182,7 +177,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
  *
  * Returns a pointer to a PMD on success, or NULL on failure.
  */
-static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
+static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
 {
 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
 	p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
@@ -194,12 +189,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
 		if (!new_pud_page)
 			return NULL;
 
-		if (p4d_none(*p4d)) {
-			set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
-			new_pud_page = 0;
-		}
-		if (new_pud_page)
-			free_page(new_pud_page);
+		set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
 	}
 
 	pud = pud_offset(p4d, address);
@@ -213,12 +203,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
 		if (!new_pmd_page)
 			return NULL;
 
-		if (pud_none(*pud)) {
-			set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
-			new_pmd_page = 0;
-		}
-		if (new_pmd_page)
-			free_page(new_pmd_page);
+		set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
 	}
 
 	return pmd_offset(pud, address);
@@ -251,12 +236,7 @@ static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
 		if (!new_pte_page)
 			return NULL;
 
-		if (pmd_none(*pmd)) {
-			set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
-			new_pte_page = 0;
-		}
-		if (new_pte_page)
-			free_page(new_pte_page);
+		set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
 	}
 
 	pte = pte_offset_kernel(pmd, address);
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 7a5350d..563049c 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -594,6 +594,11 @@ char *__init pcibios_setup(char *str)
 	} else if (!strcmp(str, "nocrs")) {
 		pci_probe |= PCI_ROOT_NO_CRS;
 		return NULL;
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+	} else if (!strcmp(str, "big_root_window")) {
+		pci_probe |= PCI_BIG_ROOT_WINDOW;
+		return NULL;
+#endif
 	} else if (!strcmp(str, "earlydump")) {
 		pci_early_dump_regs = 1;
 		return NULL;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index e663d6b..f6a26e3 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -662,10 +662,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
  */
 static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
 {
-	unsigned i;
 	u32 base, limit, high;
-	struct resource *res, *conflict;
 	struct pci_dev *other;
+	struct resource *res;
+	unsigned i;
+	int r;
+
+	if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
+		return;
 
 	/* Check that we are the only device of that type */
 	other = pci_get_device(dev->vendor, dev->device, NULL);
@@ -699,22 +703,25 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
 	if (!res)
 		return;
 
+	/*
+	 * Allocate a 256GB window directly below the 0xfd00000000 hardware
+	 * limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6).
+	 */
 	res->name = "PCI Bus 0000:00";
 	res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
 		IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
-	res->start = 0x100000000ull;
+	res->start = 0xbd00000000ull;
 	res->end = 0xfd00000000ull - 1;
 
-	/* Just grab the free area behind system memory for this */
-	while ((conflict = request_resource_conflict(&iomem_resource, res))) {
-		if (conflict->end >= res->end) {
-			kfree(res);
-			return;
-		}
-		res->start = conflict->end + 1;
+	r = request_resource(&iomem_resource, res);
+	if (r) {
+		kfree(res);
+		return;
 	}
 
-	dev_info(&dev->dev, "adding root bus resource %pR\n", res);
+	dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
+		 res);
+	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
 
 	base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
 		AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index d87ac96..2dd15e9 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -135,7 +135,9 @@ pgd_t * __init efi_call_phys_prolog(void)
 				pud[j] = *pud_offset(p4d_k, vaddr);
 			}
 		}
+		pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
 	}
+
 out:
 	__flush_tlb_all();
 
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
index dc036e5..5a0483e 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
@@ -60,7 +60,7 @@ static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata)
 	return 0;
 }
 
-static const struct bt_sfi_data tng_bt_sfi_data __initdata = {
+static struct bt_sfi_data tng_bt_sfi_data __initdata = {
 	.setup	= tng_bt_sfi_setup,
 };
 
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 4d62c07..d850762 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1325,20 +1325,18 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
 {
 	struct {
 		struct mmuext_op op;
-#ifdef CONFIG_SMP
-		DECLARE_BITMAP(mask, num_processors);
-#else
 		DECLARE_BITMAP(mask, NR_CPUS);
-#endif
 	} *args;
 	struct multicall_space mcs;
+	const size_t mc_entry_size = sizeof(args->op) +
+		sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
 
 	trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
 
 	if (cpumask_empty(cpus))
 		return;		/* nothing to do */
 
-	mcs = xen_mc_entry(sizeof(*args));
+	mcs = xen_mc_entry(mc_entry_size);
 	args = mcs.args;
 	args->op.arg2.vcpumask = to_cpumask(args->mask);
 
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 75011b8..3b34745 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -72,7 +72,7 @@ u64 xen_clocksource_read(void);
 void xen_setup_cpu_clockevents(void);
 void xen_save_time_memory_area(void);
 void xen_restore_time_memory_area(void);
-void __init xen_init_time_ops(void);
+void __ref xen_init_time_ops(void);
 void __init xen_hvm_init_time_ops(void);
 
 irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 60d7366..9a636f9 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -167,6 +167,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
 
 			spawn->alg = NULL;
 			spawns = &inst->alg.cra_users;
+
+			/*
+			 * We may encounter an unregistered instance here, since
+			 * an instance's spawns are set up prior to the instance
+			 * being registered.  An unregistered instance will have
+			 * NULL ->cra_users.next, since ->cra_users isn't
+			 * properly initialized until registration.  But an
+			 * unregistered instance cannot have any users, so treat
+			 * it the same as ->cra_users being empty.
+			 */
+			if (spawns->next == NULL)
+				break;
 		}
 	} while ((spawns = crypto_more_spawns(alg, &stack, &top,
 					      &secondary_spawns)));
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index bdc8790..2415ad9 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -236,6 +236,9 @@
 config GENERIC_CPU_AUTOPROBE
 	bool
 
+config GENERIC_CPU_VULNERABILITIES
+	bool
+
 config SOC_BUS
 	bool
 	select GLOB
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 58a9b60..d990384 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -511,10 +511,58 @@ static void __init cpu_dev_register_generic(void)
 #endif
 }
 
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+
+ssize_t __weak cpu_show_meltdown(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v1(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v2(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "Not affected\n");
+}
+
+static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+
+static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+	&dev_attr_meltdown.attr,
+	&dev_attr_spectre_v1.attr,
+	&dev_attr_spectre_v2.attr,
+	NULL
+};
+
+static const struct attribute_group cpu_root_vulnerabilities_group = {
+	.name  = "vulnerabilities",
+	.attrs = cpu_root_vulnerabilities_attrs,
+};
+
+static void __init cpu_register_vulnerabilities(void)
+{
+	if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
+			       &cpu_root_vulnerabilities_group))
+		pr_err("Unable to register CPU vulnerabilities\n");
+}
+
+#else
+static inline void cpu_register_vulnerabilities(void) { }
+#endif
+
 void __init cpu_dev_init(void)
 {
 	if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
 		panic("Failed to register CPU subsystem");
 
 	cpu_dev_register_generic();
+	cpu_register_vulnerabilities();
 }
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 85d4c57..49af946 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2777,12 +2777,12 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 }
 
 static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
-		unsigned int opcode, int rings)
+		unsigned int opcode, unsigned long rings)
 {
 	struct cmd_info *info = NULL;
 	unsigned int ring;
 
-	for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
+	for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
 		info = find_cmd_entry(gvt, opcode, ring);
 		if (info)
 			break;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 8e33114..64d67ff 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1359,12 +1359,15 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
 			return ret;
 	} else {
 		if (!test_bit(index, spt->post_shadow_bitmap)) {
+			int type = spt->shadow_page.type;
+
 			ppgtt_get_shadow_entry(spt, &se, index);
 			ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
 			if (ret)
 				return ret;
+			ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
+			ppgtt_set_shadow_entry(spt, &se, index);
 		}
-
 		ppgtt_set_post_shadow(spt, index);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 18de656..5cfba89 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -467,7 +467,7 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
 	struct drm_i915_gem_request *rq;
 	struct intel_engine_cs *engine;
 
-	if (!dma_fence_is_i915(fence))
+	if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
 		return;
 
 	rq = to_request(fence);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 333f40b..7923dfd 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7027,6 +7027,8 @@ enum {
 #define GEN9_SLICE_COMMON_ECO_CHICKEN0		_MMIO(0x7308)
 #define  DISABLE_PIXEL_MASK_CAMMING		(1<<14)
 
+#define GEN9_SLICE_COMMON_ECO_CHICKEN1		_MMIO(0x731c)
+
 #define GEN7_L3SQCREG1				_MMIO(0xB010)
 #define  VLV_B0_WA_L3SQCREG1_VALUE		0x00D30000
 
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index ab5bf4e..6074e04d 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1390,6 +1390,11 @@ static int glk_init_workarounds(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
+	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
+	ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+	if (ret)
+		return ret;
+
 	/* WaToEnableHwFixForPushConstHWBug:glk */
 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d36e256..e71a8cd 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -974,6 +974,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
 
 	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
 
+	if (i915_gem_request_completed(request))
+		return;
+
 	if (prio <= READ_ONCE(request->priotree.priority))
 		return;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index a2978a3..700fc75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -174,6 +174,7 @@ gf119_sor = {
 		.links = gf119_sor_dp_links,
 		.power = g94_sor_dp_power,
 		.pattern = gf119_sor_dp_pattern,
+		.drive = gf119_sor_dp_drive,
 		.vcpi = gf119_sor_dp_vcpi,
 		.audio = gf119_sor_dp_audio,
 		.audio_sym = gf119_sor_dp_audio_sym,
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index b0a1ded..476079f 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2656,6 +2656,9 @@ static int tegra_sor_probe(struct platform_device *pdev)
 				name, err);
 			goto remove;
 		}
+	} else {
+		/* fall back to the module clock on SOR0 (eDP/LVDS only) */
+		sor->clk_out = sor->clk;
 	}
 
 	sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 26eddbb..3dd62d7 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -209,9 +209,6 @@ vc4_irq_postinstall(struct drm_device *dev)
 {
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
 
-	/* Undo the effects of a previous vc4_irq_uninstall. */
-	enable_irq(dev->irq);
-
 	/* Enable both the render done and out of memory interrupts. */
 	V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
 
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 622cd43..493f392b 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev)
 		return ret;
 
 	vc4_v3d_init_hw(vc4->dev);
+
+	/* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
+	enable_irq(vc4->dev->irq);
 	vc4_irq_postinstall(vc4->dev);
 
 	return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 21c62a3..87e8af5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2731,6 +2731,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
 	}
 
 	view_type = vmw_view_cmd_to_type(header->id);
+	if (view_type == vmw_view_max)
+		return -EINVAL;
 	cmd = container_of(header, typeof(*cmd), header);
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 				user_surface_converter,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 0545740..641294a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -697,7 +697,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
 	vps->pinned = 0;
 
 	/* Mapping is managed by prepare_fb/cleanup_fb */
-	memset(&vps->guest_map, 0, sizeof(vps->guest_map));
 	memset(&vps->host_map, 0, sizeof(vps->host_map));
 	vps->cpp = 0;
 
@@ -760,11 +759,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
 
 
 	/* Should have been freed by cleanup_fb */
-	if (vps->guest_map.virtual) {
-		DRM_ERROR("Guest mapping not freed\n");
-		ttm_bo_kunmap(&vps->guest_map);
-	}
-
 	if (vps->host_map.virtual) {
 		DRM_ERROR("Host mapping not freed\n");
 		ttm_bo_kunmap(&vps->host_map);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index ff9c838..cd9da2d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -175,7 +175,7 @@ struct vmw_plane_state {
 	int pinned;
 
 	/* For CPU Blit */
-	struct ttm_bo_kmap_obj host_map, guest_map;
+	struct ttm_bo_kmap_obj host_map;
 	unsigned int cpp;
 };
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 90b5437..b68d748 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -114,7 +114,7 @@ struct vmw_screen_target_display_unit {
 	bool defined;
 
 	/* For CPU Blit */
-	struct ttm_bo_kmap_obj host_map, guest_map;
+	struct ttm_bo_kmap_obj host_map;
 	unsigned int cpp;
 };
 
@@ -695,7 +695,8 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
 	s32 src_pitch, dst_pitch;
 	u8 *src, *dst;
 	bool not_used;
-
+	struct ttm_bo_kmap_obj guest_map;
+	int ret;
 
 	if (!dirty->num_hits)
 		return;
@@ -706,6 +707,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
 	if (width == 0 || height == 0)
 		return;
 
+	ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages,
+			  &guest_map);
+	if (ret) {
+		DRM_ERROR("Failed mapping framebuffer for blit: %d\n",
+			  ret);
+		goto out_cleanup;
+	}
 
 	/* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
 	src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
@@ -713,7 +721,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
 	src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
 
 	dst_pitch = ddirty->pitch;
-	dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
+	dst = ttm_kmap_obj_virtual(&guest_map, &not_used);
 	dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
 
 
@@ -772,6 +780,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
 		vmw_fifo_commit(dev_priv, sizeof(*cmd));
 	}
 
+	ttm_bo_kunmap(&guest_map);
 out_cleanup:
 	ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
 	ddirty->right = ddirty->bottom = S32_MIN;
@@ -1109,9 +1118,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
 {
 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 
-	if (vps->guest_map.virtual)
-		ttm_bo_kunmap(&vps->guest_map);
-
 	if (vps->host_map.virtual)
 		ttm_bo_kunmap(&vps->host_map);
 
@@ -1277,33 +1283,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
 	 */
 	if (vps->content_fb_type == SEPARATE_DMA &&
 	    !(dev_priv->capabilities & SVGA_CAP_3D)) {
-
-		struct vmw_framebuffer_dmabuf *new_vfbd;
-
-		new_vfbd = vmw_framebuffer_to_vfbd(new_fb);
-
-		ret = ttm_bo_reserve(&new_vfbd->buffer->base, false, false,
-				     NULL);
-		if (ret)
-			goto out_srf_unpin;
-
-		ret = ttm_bo_kmap(&new_vfbd->buffer->base, 0,
-				  new_vfbd->buffer->base.num_pages,
-				  &vps->guest_map);
-
-		ttm_bo_unreserve(&new_vfbd->buffer->base);
-
-		if (ret) {
-			DRM_ERROR("Failed to map content buffer to CPU\n");
-			goto out_srf_unpin;
-		}
-
 		ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0,
 				  vps->surf->res.backup->base.num_pages,
 				  &vps->host_map);
 		if (ret) {
 			DRM_ERROR("Failed to map display buffer to CPU\n");
-			ttm_bo_kunmap(&vps->guest_map);
 			goto out_srf_unpin;
 		}
 
@@ -1350,7 +1334,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
 	stdu->display_srf = vps->surf;
 	stdu->content_fb_type = vps->content_fb_type;
 	stdu->cpp = vps->cpp;
-	memcpy(&stdu->guest_map, &vps->guest_map, sizeof(vps->guest_map));
 	memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map));
 
 	if (!stdu->defined)
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 7750a9c..1df7da4 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -763,11 +763,11 @@ static int complete_subctxt(struct hfi1_filedata *fd)
 	}
 
 	if (ret) {
-		hfi1_rcd_put(fd->uctxt);
-		fd->uctxt = NULL;
 		spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
 		__clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
 		spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
+		hfi1_rcd_put(fd->uctxt);
+		fd->uctxt = NULL;
 	}
 
 	return ret;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8ac50de..262c1aa 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1324,7 +1324,8 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
 		return err;
 
 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
-	    !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
 		return err;
 
 	mutex_lock(&dev->lb_mutex);
@@ -1342,7 +1343,8 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
 	mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
 
 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
-	    !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
 		return;
 
 	mutex_lock(&dev->lb_mutex);
@@ -4158,7 +4160,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 		goto err_cnt;
 
 	dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
-	if (!dev->mdev->priv.uar)
+	if (IS_ERR(dev->mdev->priv.uar))
 		goto err_cong;
 
 	err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
@@ -4187,7 +4189,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 	}
 
 	if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
-	    MLX5_CAP_GEN(mdev, disable_local_lb))
+	    (MLX5_CAP_GEN(mdev, disable_local_lb_uc) ||
+	     MLX5_CAP_GEN(mdev, disable_local_lb_mc)))
 		mutex_init(&dev->lb_mutex);
 
 	dev->ib_active = true;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 31ad2885..cffe596 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4362,12 +4362,11 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
 
 	memset(ah_attr, 0, sizeof(*ah_attr));
 
-	ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
-	rdma_ah_set_port_num(ah_attr, path->port);
-	if (rdma_ah_get_port_num(ah_attr) == 0 ||
-	    rdma_ah_get_port_num(ah_attr) > MLX5_CAP_GEN(dev, num_ports))
+	if (!path->port || path->port > MLX5_CAP_GEN(dev, num_ports))
 		return;
 
+	ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
+
 	rdma_ah_set_port_num(ah_attr, path->port);
 	rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
 
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 720dfb3..1b02283 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -741,6 +741,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
 {
 	struct isert_conn *isert_conn = cma_id->qp->qp_context;
 
+	ib_drain_qp(isert_conn->qp);
 	list_del_init(&isert_conn->node);
 	isert_conn->cm_id = NULL;
 	isert_put_conn(isert_conn);
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index fcf7235..157e1d9 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -24,6 +24,7 @@
 #include <linux/kernel.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/mmc/host.h>
@@ -667,3 +668,5 @@ int renesas_sdhi_remove(struct platform_device *pdev)
 	return 0;
 }
 EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index f7f157a..555c7f1 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1424,7 +1424,9 @@ static const struct file_operations s3cmci_fops_state = {
 struct s3cmci_reg {
 	unsigned short	addr;
 	unsigned char	*name;
-} debug_regs[] = {
+};
+
+static const struct s3cmci_reg debug_regs[] = {
 	DBG_REG(CON),
 	DBG_REG(PRE),
 	DBG_REG(CMDARG),
@@ -1446,7 +1448,7 @@ struct s3cmci_reg {
 static int s3cmci_regs_show(struct seq_file *seq, void *v)
 {
 	struct s3cmci_host *host = seq->private;
-	struct s3cmci_reg *rptr = debug_regs;
+	const struct s3cmci_reg *rptr = debug_regs;
 
 	for (; rptr->name; rptr++)
 		seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 2260063..6e5cf9d 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -413,6 +413,7 @@ static int of_dev_node_match(struct device *dev, const void *data)
 	return dev->of_node == data;
 }
 
+/* Note this function returns a reference to the mux_chip dev. */
 static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
 {
 	struct device *dev;
@@ -466,6 +467,7 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
 	    (!args.args_count && (mux_chip->controllers > 1))) {
 		dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
 			np, args.np);
+		put_device(&mux_chip->dev);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -476,10 +478,10 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
 	if (controller >= mux_chip->controllers) {
 		dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n",
 			np, controller, args.np);
+		put_device(&mux_chip->dev);
 		return ERR_PTR(-EINVAL);
 	}
 
-	get_device(&mux_chip->dev);
 	return &mux_chip->mux[controller];
 }
 EXPORT_SYMBOL_GPL(mux_control_get);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 9eb5e22..f79da4b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -16,7 +16,6 @@
 #include <linux/pci.h>
 
 #include "ver.h"
-#include "aq_nic.h"
 #include "aq_cfg.h"
 #include "aq_utils.h"
 
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index b3825de..5d67f13 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -7,7 +7,7 @@
  * version 2, as published by the Free Software Foundation.
  */
 
-/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific
+/* File aq_hw.h: Declaration of abstract interface for NIC hardware specific
  * functions.
  */
 
@@ -15,6 +15,8 @@
 #define AQ_HW_H
 
 #include "aq_common.h"
+#include "aq_rss.h"
+#include "hw_atl/hw_atl_utils.h"
 
 /* NIC H/W capabilities */
 struct aq_hw_caps_s {
@@ -86,13 +88,33 @@ struct aq_stats_s {
 
 #define AQ_HW_FLAG_ERRORS      (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG)
 
+#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
+			AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
+			AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
+
+#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
+					AQ_NIC_LINK_DOWN)
+
 struct aq_hw_s {
-	struct aq_obj_s header;
+	atomic_t flags;
 	struct aq_nic_cfg_s *aq_nic_cfg;
 	struct aq_pci_func_s *aq_pci_func;
 	void __iomem *mmio;
 	unsigned int not_ff_addr;
 	struct aq_hw_link_status_s aq_link_status;
+	struct hw_aq_atl_utils_mbox mbox;
+	struct hw_atl_stats_s last_stats;
+	struct aq_stats_s curr_stats;
+	u64 speed;
+	u32 itr_tx;
+	u32 itr_rx;
+	unsigned int chip_features;
+	u32 fw_ver_actual;
+	atomic_t dpc;
+	u32 mbox_addr;
+	u32 rpc_addr;
+	u32 rpc_tid;
+	struct hw_aq_atl_utils_fw_rpc rpc;
 };
 
 struct aq_ring_s;
@@ -102,7 +124,7 @@ struct sk_buff;
 
 struct aq_hw_ops {
 	struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func,
-				  unsigned int port, struct aq_hw_ops *ops);
+				  unsigned int port);
 
 	void (*destroy)(struct aq_hw_s *self);
 
@@ -124,7 +146,6 @@ struct aq_hw_ops {
 				      struct aq_ring_s *aq_ring);
 
 	int (*hw_get_mac_permanent)(struct aq_hw_s *self,
-				    struct aq_hw_caps_s *aq_hw_caps,
 				    u8 *mac);
 
 	int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
@@ -135,8 +156,7 @@ struct aq_hw_ops {
 
 	int (*hw_reset)(struct aq_hw_s *self);
 
-	int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg,
-		       u8 *mac_addr);
+	int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
 
 	int (*hw_start)(struct aq_hw_s *self);
 
@@ -184,7 +204,8 @@ struct aq_hw_ops {
 			       struct aq_rss_parameters *rss_params);
 
 	int (*hw_get_regs)(struct aq_hw_s *self,
-			   struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
+			   const struct aq_hw_caps_s *aq_hw_caps,
+			   u32 *regs_buff);
 
 	int (*hw_update_stats)(struct aq_hw_s *self);
 
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 5f13465..27e250d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -40,7 +40,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
 	u32 value = readl(hw->mmio + reg);
 
 	if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr))
-		aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG);
+		aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
 
 	return value;
 }
@@ -54,11 +54,11 @@ int aq_hw_err_from_flags(struct aq_hw_s *hw)
 {
 	int err = 0;
 
-	if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+	if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
 		err = -ENXIO;
 		goto err_exit;
 	}
-	if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) {
+	if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_HW)) {
 		err = -EIO;
 		goto err_exit;
 	}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 5d6c40d..887bc84 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -13,37 +13,32 @@
 #include "aq_nic.h"
 #include "aq_pci_func.h"
 #include "aq_ethtool.h"
-#include "hw_atl/hw_atl_a0.h"
-#include "hw_atl/hw_atl_b0.h"
 
 #include <linux/netdevice.h>
 #include <linux/module.h>
 
-static const struct pci_device_id aq_pci_tbl[] = {
-	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
-	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
-	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
-	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
-	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
-	{}
-};
-
-MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
-
 MODULE_LICENSE("GPL v2");
 MODULE_VERSION(AQ_CFG_DRV_VERSION);
 MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
 MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
 
-static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
+static const struct net_device_ops aq_ndev_ops;
+
+struct net_device *aq_ndev_alloc(void)
 {
-	struct aq_hw_ops *ops = NULL;
+	struct net_device *ndev = NULL;
+	struct aq_nic_s *aq_nic = NULL;
 
-	ops = hw_atl_a0_get_ops_by_id(pdev);
-	if (!ops)
-		ops = hw_atl_b0_get_ops_by_id(pdev);
+	ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
+	if (!ndev)
+		return NULL;
 
-	return ops;
+	aq_nic = netdev_priv(ndev);
+	aq_nic->ndev = ndev;
+	ndev->netdev_ops = &aq_ndev_ops;
+	ndev->ethtool_ops = &aq_ethtool_ops;
+
+	return ndev;
 }
 
 static int aq_ndev_open(struct net_device *ndev)
@@ -170,66 +165,3 @@ static const struct net_device_ops aq_ndev_ops = {
 	.ndo_set_mac_address = aq_ndev_set_mac_address,
 	.ndo_set_features = aq_ndev_set_features
 };
-
-static int aq_pci_probe(struct pci_dev *pdev,
-			const struct pci_device_id *pci_id)
-{
-	struct aq_hw_ops *aq_hw_ops = NULL;
-	struct aq_pci_func_s *aq_pci_func = NULL;
-	int err = 0;
-
-	err = pci_enable_device(pdev);
-	if (err < 0)
-		goto err_exit;
-	aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
-	aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev,
-					&aq_ndev_ops, &aq_ethtool_ops);
-	if (!aq_pci_func) {
-		err = -ENOMEM;
-		goto err_exit;
-	}
-	err = aq_pci_func_init(aq_pci_func);
-	if (err < 0)
-		goto err_exit;
-
-err_exit:
-	if (err < 0) {
-		if (aq_pci_func)
-			aq_pci_func_free(aq_pci_func);
-	}
-	return err;
-}
-
-static void aq_pci_remove(struct pci_dev *pdev)
-{
-	struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-
-	aq_pci_func_deinit(aq_pci_func);
-	aq_pci_func_free(aq_pci_func);
-}
-
-static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
-{
-	struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-
-	return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
-}
-
-static int aq_pci_resume(struct pci_dev *pdev)
-{
-	struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
-	pm_message_t pm_msg = PMSG_RESTORE;
-
-	return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
-}
-
-static struct pci_driver aq_pci_ops = {
-	.name = AQ_CFG_DRV_NAME,
-	.id_table = aq_pci_tbl,
-	.probe = aq_pci_probe,
-	.remove = aq_pci_remove,
-	.suspend = aq_pci_suspend,
-	.resume = aq_pci_resume,
-};
-
-module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index 9748e7e..ce92152 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -14,4 +14,6 @@
 
 #include "aq_common.h"
 
+struct net_device *aq_ndev_alloc(void);
+
 #endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 75a894a..d982513 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -14,7 +14,7 @@
 #include "aq_vec.h"
 #include "aq_hw.h"
 #include "aq_pci_func.h"
-#include "aq_nic_internal.h"
+#include "aq_main.h"
 
 #include <linux/moduleparam.h>
 #include <linux/netdevice.h>
@@ -150,9 +150,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
 
 	self->link_status = self->aq_hw->aq_link_status;
 	if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
-		aq_utils_obj_set(&self->header.flags,
+		aq_utils_obj_set(&self->flags,
 				 AQ_NIC_FLAG_STARTED);
-		aq_utils_obj_clear(&self->header.flags,
+		aq_utils_obj_clear(&self->flags,
 				   AQ_NIC_LINK_DOWN);
 		netif_carrier_on(self->ndev);
 		netif_tx_wake_all_queues(self->ndev);
@@ -160,7 +160,7 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
 	if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
 		netif_carrier_off(self->ndev);
 		netif_tx_disable(self->ndev);
-		aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
+		aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
 	}
 	return 0;
 }
@@ -171,7 +171,7 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
 	int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
 	int err = 0;
 
-	if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
+	if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
 		goto err_exit;
 
 	err = aq_nic_update_link_status(self);
@@ -205,14 +205,7 @@ static void aq_nic_polling_timer_cb(struct timer_list *t)
 		AQ_CFG_POLLING_TIMER_INTERVAL);
 }
 
-static struct net_device *aq_nic_ndev_alloc(void)
-{
-	return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
-}
-
-struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
-				   const struct ethtool_ops *et_ops,
-				   struct pci_dev *pdev,
+struct aq_nic_s *aq_nic_alloc_cold(struct pci_dev *pdev,
 				   struct aq_pci_func_s *aq_pci_func,
 				   unsigned int port,
 				   const struct aq_hw_ops *aq_hw_ops)
@@ -221,7 +214,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
 	struct aq_nic_s *self = NULL;
 	int err = 0;
 
-	ndev = aq_nic_ndev_alloc();
+	ndev = aq_ndev_alloc();
 	if (!ndev) {
 		err = -ENOMEM;
 		goto err_exit;
@@ -229,9 +222,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
 
 	self = netdev_priv(ndev);
 
-	ndev->netdev_ops = ndev_ops;
-	ndev->ethtool_ops = et_ops;
-
 	SET_NETDEV_DEV(ndev, &pdev->dev);
 
 	ndev->if_port = port;
@@ -242,8 +232,9 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
 	self->aq_hw_ops = *aq_hw_ops;
 	self->port = (u8)port;
 
-	self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
-						&self->aq_hw_ops);
+	self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port);
+	self->aq_hw->aq_nic_cfg = &self->aq_nic_cfg;
+
 	err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
 					  pdev->device, pdev->subsystem_device);
 	if (err < 0)
@@ -268,7 +259,6 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
 		goto err_exit;
 	}
 	err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
-			    self->aq_nic_cfg.aq_hw_caps,
 			    self->ndev->dev_addr);
 	if (err < 0)
 		goto err_exit;
@@ -295,7 +285,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
 
 int aq_nic_ndev_init(struct aq_nic_s *self)
 {
-	struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
+	const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
 	struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
 
 	self->ndev->hw_features |= aq_hw_caps->hw_features;
@@ -366,11 +356,6 @@ void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
 	self->aq_ring_tx[idx] = ring;
 }
 
-struct device *aq_nic_get_dev(struct aq_nic_s *self)
-{
-	return self->ndev->dev.parent;
-}
-
 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
 {
 	return self->ndev;
@@ -387,7 +372,7 @@ int aq_nic_init(struct aq_nic_s *self)
 	if (err < 0)
 		goto err_exit;
 
-	err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
+	err = self->aq_hw_ops.hw_init(self->aq_hw,
 			    aq_nic_get_ndev(self)->dev_addr);
 	if (err < 0)
 		goto err_exit;
@@ -992,7 +977,7 @@ void aq_nic_free_hot_resources(struct aq_nic_s *self)
 	if (!self)
 		goto err_exit;
 
-	for (i = AQ_DIMOF(self->aq_vec); i--;) {
+	for (i = ARRAY_SIZE(self->aq_vec); i--;) {
 		if (self->aq_vec[i]) {
 			aq_vec_free(self->aq_vec[i]);
 			self->aq_vec[i] = NULL;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 3c9f8db..1cd7d72 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -14,10 +14,13 @@
 
 #include "aq_common.h"
 #include "aq_rss.h"
+#include "aq_hw.h"
 
 struct aq_ring_s;
 struct aq_pci_func_s;
 struct aq_hw_ops;
+struct aq_fw_s;
+struct aq_vec_s;
 
 #define AQ_NIC_FC_OFF    0U
 #define AQ_NIC_FC_TX     1U
@@ -33,7 +36,7 @@ struct aq_hw_ops;
 #define AQ_NIC_RATE_100M       BIT(5)
 
 struct aq_nic_cfg_s {
-	struct aq_hw_caps_s *aq_hw_caps;
+	const struct aq_hw_caps_s *aq_hw_caps;
 	u64 hw_features;
 	u32 rxds;		/* rx ring size, descriptors # */
 	u32 txds;		/* tx ring size, descriptors # */
@@ -44,7 +47,6 @@ struct aq_nic_cfg_s {
 	u16 tx_itr;
 	u32 num_rss_queues;
 	u32 mtu;
-	u32 ucp_0x364;
 	u32 flow_control;
 	u32 link_speed_msk;
 	u32 vlan_id;
@@ -69,9 +71,38 @@ struct aq_nic_cfg_s {
 #define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
 	((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
 
-struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
-				   const struct ethtool_ops *et_ops,
-				   struct pci_dev *pdev,
+struct aq_nic_s {
+	atomic_t flags;
+	struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
+	struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
+	struct aq_hw_s *aq_hw;
+	struct net_device *ndev;
+	struct aq_pci_func_s *aq_pci_func;
+	unsigned int aq_vecs;
+	unsigned int packet_filter;
+	unsigned int power_state;
+	u8 port;
+	struct aq_hw_ops aq_hw_ops;
+	struct aq_hw_caps_s aq_hw_caps;
+	struct aq_nic_cfg_s aq_nic_cfg;
+	struct timer_list service_timer;
+	struct timer_list polling_timer;
+	struct aq_hw_link_status_s link_status;
+	struct {
+		u32 count;
+		u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+	} mc_list;
+
+	struct pci_dev *pdev;
+	unsigned int msix_entry_mask;
+};
+
+static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
+{
+	return self->ndev->dev.parent;
+}
+
+struct aq_nic_s *aq_nic_alloc_cold(struct pci_dev *pdev,
 				   struct aq_pci_func_s *aq_pci_func,
 				   unsigned int port,
 				   const struct aq_hw_ops *aq_hw_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
deleted file mode 100644
index e7d2711..0000000
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-/* File aq_nic_internal.h: Definition of private object structure. */
-
-#ifndef AQ_NIC_INTERNAL_H
-#define AQ_NIC_INTERNAL_H
-
-struct aq_nic_s {
-	struct aq_obj_s header;
-	struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
-	struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
-	struct aq_hw_s *aq_hw;
-	struct net_device *ndev;
-	struct aq_pci_func_s *aq_pci_func;
-	unsigned int aq_vecs;
-	unsigned int packet_filter;
-	unsigned int power_state;
-	u8 port;
-	struct aq_hw_ops aq_hw_ops;
-	struct aq_hw_caps_s aq_hw_caps;
-	struct aq_nic_cfg_s aq_nic_cfg;
-	struct timer_list service_timer;
-	struct timer_list polling_timer;
-	struct aq_hw_link_status_s link_status;
-	struct {
-		u32 count;
-		u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
-	} mc_list;
-};
-
-#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
-			AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
-			AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
-
-#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
-					AQ_NIC_LINK_DOWN)
-
-#endif /* AQ_NIC_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 58c29d0..78ef7d2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -9,11 +9,15 @@
 
 /* File aq_pci_func.c: Definition of PCI functions. */
 
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
 #include "aq_pci_func.h"
 #include "aq_nic.h"
 #include "aq_vec.h"
 #include "aq_hw.h"
-#include <linux/interrupt.h>
+#include "hw_atl/hw_atl_a0.h"
+#include "hw_atl/hw_atl_b0.h"
 
 struct aq_pci_func_s {
 	struct pci_dev *pdev;
@@ -29,10 +33,30 @@ struct aq_pci_func_s {
 	struct aq_hw_caps_s aq_hw_caps;
 };
 
-struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
-					struct pci_dev *pdev,
-					const struct net_device_ops *ndev_ops,
-					const struct ethtool_ops *eth_ops)
+static const struct pci_device_id aq_pci_tbl[] = {
+	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
+	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
+	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
+	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
+	{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
+	{}
+};
+
+MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
+
+static const struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
+{
+	const struct aq_hw_ops *ops = NULL;
+
+	ops = hw_atl_a0_get_ops_by_id(pdev);
+	if (!ops)
+		ops = hw_atl_b0_get_ops_by_id(pdev);
+
+	return ops;
+}
+
+struct aq_pci_func_s *aq_pci_func_alloc(const struct aq_hw_ops *aq_hw_ops,
+					struct pci_dev *pdev)
 {
 	struct aq_pci_func_s *self = NULL;
 	int err = 0;
@@ -59,8 +83,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
 	self->ports = self->aq_hw_caps.ports;
 
 	for (port = 0; port < self->ports; ++port) {
-		struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
-							    pdev, self,
+		struct aq_nic_s *aq_nic = aq_nic_alloc_cold(pdev, self,
 							    port, aq_hw_ops);
 
 		if (!aq_nic) {
@@ -297,3 +320,65 @@ int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
 err_exit:
 	return err;
 }
+
+static int aq_pci_probe(struct pci_dev *pdev,
+			const struct pci_device_id *pci_id)
+{
+	const struct aq_hw_ops *aq_hw_ops = NULL;
+	struct aq_pci_func_s *aq_pci_func = NULL;
+	int err = 0;
+
+	err = pci_enable_device(pdev);
+	if (err < 0)
+		goto err_exit;
+	aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
+	aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev);
+	if (!aq_pci_func) {
+		err = -ENOMEM;
+		goto err_exit;
+	}
+	err = aq_pci_func_init(aq_pci_func);
+	if (err < 0)
+		goto err_exit;
+
+err_exit:
+	if (err < 0) {
+		if (aq_pci_func)
+			aq_pci_func_free(aq_pci_func);
+	}
+	return err;
+}
+
+static void aq_pci_remove(struct pci_dev *pdev)
+{
+	struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+
+	aq_pci_func_deinit(aq_pci_func);
+	aq_pci_func_free(aq_pci_func);
+}
+
+static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
+{
+	struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+
+	return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
+}
+
+static int aq_pci_resume(struct pci_dev *pdev)
+{
+	struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+	pm_message_t pm_msg = PMSG_RESTORE;
+
+	return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
+}
+
+static struct pci_driver aq_pci_ops = {
+	.name = AQ_CFG_DRV_NAME,
+	.id_table = aq_pci_tbl,
+	.probe = aq_pci_probe,
+	.remove = aq_pci_remove,
+	.suspend = aq_pci_suspend,
+	.resume = aq_pci_resume,
+};
+
+module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
index ecb0337..5f100ea 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
@@ -13,11 +13,10 @@
 #define AQ_PCI_FUNC_H
 
 #include "aq_common.h"
+#include "aq_nic.h"
 
-struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops,
-					struct pci_dev *pdev,
-					const struct net_device_ops *ndev_ops,
-					const struct ethtool_ops *eth_ops);
+struct aq_pci_func_s *aq_pci_func_alloc(const struct aq_hw_ops *hw_ops,
+					struct pci_dev *pdev);
 int aq_pci_func_init(struct aq_pci_func_s *self);
 int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
 			  char *name, void *aq_vec,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 519ca65..0be6a11 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -279,10 +279,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
 
 		skb_record_rx_queue(skb, self->idx);
 
-		napi_gro_receive(napi, skb);
-
 		++self->stats.rx.packets;
 		self->stats.rx.bytes += skb->len;
+
+		napi_gro_receive(napi, skb);
 	}
 
 err_exit:
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 58440787..965fae0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -15,6 +15,7 @@
 #include "aq_common.h"
 
 struct page;
+struct aq_nic_cfg_s;
 
 /*           TxC       SOP        DX         EOP
  *         +----------+----------+----------+-----------
@@ -105,7 +106,6 @@ union aq_ring_stats_s {
 };
 
 struct aq_ring_s {
-	struct aq_obj_s header;
 	struct aq_ring_buff_s *buff_ring;
 	u8 *dx_ring;		/* descriptors ring, dma shared mem */
 	struct aq_nic_s *aq_nic;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
index e12bcdf..786ea81 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -14,12 +14,6 @@
 
 #include "aq_common.h"
 
-#define AQ_DIMOF(_ARY_)  ARRAY_SIZE(_ARY_)
-
-struct aq_obj_s {
-	atomic_t flags;
-};
-
 static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)
 {
 	unsigned long flags_old, flags_new;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 5fecc9a..f890b8a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -19,8 +19,7 @@
 #include <linux/netdevice.h>
 
 struct aq_vec_s {
-	struct aq_obj_s header;
-	struct aq_hw_ops *aq_hw_ops;
+	const struct aq_hw_ops *aq_hw_ops;
 	struct aq_hw_s *aq_hw;
 	struct aq_nic_s *aq_nic;
 	unsigned int tx_rings;
@@ -166,7 +165,7 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
 	return self;
 }
 
-int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
 		struct aq_hw_s *aq_hw)
 {
 	struct aq_ring_s *ring = NULL;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 6c68b18..8bdf60b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -19,6 +19,8 @@
 
 struct aq_hw_s;
 struct aq_hw_ops;
+struct aq_nic_s;
+struct aq_nic_cfg_s;
 struct aq_ring_stats_rx_s;
 struct aq_ring_stats_tx_s;
 
@@ -26,7 +28,7 @@ irqreturn_t aq_vec_isr(int irq, void *private);
 irqreturn_t aq_vec_isr_legacy(int irq, void *private);
 struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
 			      struct aq_nic_cfg_s *aq_nic_cfg);
-int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
 		struct aq_hw_s *aq_hw);
 void aq_vec_deinit(struct aq_vec_s *self);
 void aq_vec_free(struct aq_vec_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index f18dce14..4a1c1b9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -12,6 +12,7 @@
 #include "../aq_hw.h"
 #include "../aq_hw_utils.h"
 #include "../aq_ring.h"
+#include "../aq_nic.h"
 #include "hw_atl_a0.h"
 #include "hw_atl_utils.h"
 #include "hw_atl_llh.h"
@@ -36,21 +37,20 @@ static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
 }
 
 static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func,
-					unsigned int port,
-					struct aq_hw_ops *ops)
+					unsigned int port)
 {
-	struct hw_atl_s *self = NULL;
+	struct aq_hw_s *self = NULL;
 
 	self = kzalloc(sizeof(*self), GFP_KERNEL);
 	if (!self)
 		goto err_exit;
 
-	self->base.aq_pci_func = aq_pci_func;
+	self->aq_pci_func = aq_pci_func;
 
-	self->base.not_ff_addr = 0x10U;
+	self->not_ff_addr = 0x10U;
 
 err_exit:
-	return (struct aq_hw_s *)self;
+	return self;
 }
 
 static void hw_atl_a0_destroy(struct aq_hw_s *self)
@@ -62,24 +62,24 @@ static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
 {
 	int err = 0;
 
-	glb_glb_reg_res_dis_set(self, 1U);
-	pci_pci_reg_res_dis_set(self, 0U);
-	rx_rx_reg_res_dis_set(self, 0U);
-	tx_tx_reg_res_dis_set(self, 0U);
+	hw_atl_glb_glb_reg_res_dis_set(self, 1U);
+	hw_atl_pci_pci_reg_res_dis_set(self, 0U);
+	hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+	hw_atl_tx_tx_reg_res_dis_set(self, 0U);
 
 	HW_ATL_FLUSH();
-	glb_soft_res_set(self, 1);
+	hw_atl_glb_soft_res_set(self, 1);
 
 	/* check 10 times by 1ms */
-	AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+	AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U);
 	if (err < 0)
 		goto err_exit;
 
-	itr_irq_reg_res_dis_set(self, 0U);
-	itr_res_irq_set(self, 1U);
+	hw_atl_itr_irq_reg_res_dis_set(self, 0U);
+	hw_atl_itr_res_irq_set(self, 1U);
 
 	/* check 10 times by 1ms */
-	AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+	AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U);
 	if (err < 0)
 		goto err_exit;
 
@@ -99,51 +99,53 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
 	bool is_rx_flow_control = false;
 
 	/* TPS Descriptor rate init */
-	tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
-	tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+	hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+	hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
 
 	/* TPS VM init */
-	tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
 
 	/* TPS TC credits init */
-	tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
-	tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+	hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
 
-	tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
-	tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
-	tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
-	tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+	hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+	hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
 
 	/* Tx buf size */
 	buff_size = HW_ATL_A0_TXBUF_MAX;
 
-	tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
-	tpb_tx_buff_hi_threshold_per_tc_set(self,
-					    (buff_size * (1024 / 32U) * 66U) /
-					    100U, tc);
-	tpb_tx_buff_lo_threshold_per_tc_set(self,
-					    (buff_size * (1024 / 32U) * 50U) /
-					    100U, tc);
+	hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+	hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024 / 32U) * 66U) /
+						   100U, tc);
+	hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024 / 32U) * 50U) /
+						   100U, tc);
 
 	/* QoS Rx buf size per TC */
 	tc = 0;
 	is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
 	buff_size = HW_ATL_A0_RXBUF_MAX;
 
-	rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
-	rpb_rx_buff_hi_threshold_per_tc_set(self,
-					    (buff_size *
-					    (1024U / 32U) * 66U) /
-					    100U, tc);
-	rpb_rx_buff_lo_threshold_per_tc_set(self,
-					    (buff_size *
-					    (1024U / 32U) * 50U) /
-					    100U, tc);
-	rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+	hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+	hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024U / 32U) * 66U) /
+						   100U, tc);
+	hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024U / 32U) * 50U) /
+						   100U, tc);
+	hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
 
 	/* QoS 802.1p priority -> TC mapping */
 	for (i_priority = 8U; i_priority--;)
-		rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+		hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -151,20 +153,19 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
 static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
 				     struct aq_rss_parameters *rss_params)
 {
-	struct aq_nic_cfg_s *cfg = NULL;
+	struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
 	int err = 0;
 	unsigned int i = 0U;
 	unsigned int addr = 0U;
 
-	cfg = self->aq_nic_cfg;
-
 	for (i = 10, addr = 0U; i--; ++addr) {
 		u32 key_data = cfg->is_rss ?
 			__swab32(rss_params->hash_secret_key[i]) : 0U;
-		rpf_rss_key_wr_data_set(self, key_data);
-		rpf_rss_key_addr_set(self, addr);
-		rpf_rss_key_wr_en_set(self, 1U);
-		AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+		hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+		hw_atl_rpf_rss_key_addr_set(self, addr);
+		hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+		AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
+			       1000U, 10U);
 		if (err < 0)
 			goto err_exit;
 	}
@@ -193,11 +194,12 @@ static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
 			((i * 3U) & 0xFU));
 	}
 
-	for (i = AQ_DIMOF(bitary); i--;) {
-		rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
-		rpf_rss_redir_tbl_addr_set(self, i);
-		rpf_rss_redir_wr_en_set(self, 1U);
-		AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+	for (i = ARRAY_SIZE(bitary); i--;) {
+		hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+		hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+		hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+		AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
+			       1000U, 10U);
 		if (err < 0)
 			goto err_exit;
 	}
@@ -212,35 +214,35 @@ static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
 				    struct aq_nic_cfg_s *aq_nic_cfg)
 {
 	/* TX checksums offloads*/
-	tpo_ipv4header_crc_offload_en_set(self, 1);
-	tpo_tcp_udp_crc_offload_en_set(self, 1);
+	hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+	hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
 
 	/* RX checksums offloads*/
-	rpo_ipv4header_crc_offload_en_set(self, 1);
-	rpo_tcp_udp_crc_offload_en_set(self, 1);
+	hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+	hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
 
 	/* LSO offloads*/
-	tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+	hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
 
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
 {
-	thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
-	thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
-	thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+	hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+	hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+	hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
 
 	/* Tx interrupts */
-	tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+	hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
 
 	/* misc */
 	aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
 			0x00010000U : 0x00000000U);
-	tdm_tx_dca_en_set(self, 0U);
-	tdm_tx_dca_mode_set(self, 0U);
+	hw_atl_tdm_tx_dca_en_set(self, 0U);
+	hw_atl_tdm_tx_dca_mode_set(self, 0U);
 
-	tpb_tx_path_scp_ins_en_set(self, 1U);
+	hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -251,38 +253,38 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
 	int i;
 
 	/* Rx TC/RSS number config */
-	rpb_rpf_rx_traf_class_mode_set(self, 1U);
+	hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
 
 	/* Rx flow control */
-	rpb_rx_flow_ctl_mode_set(self, 1U);
+	hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
 
 	/* RSS Ring selection */
-	reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+	hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
 					0xB3333333U : 0x00000000U);
 
 	/* Multicast filters */
 	for (i = HW_ATL_A0_MAC_MAX; i--;) {
-		rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
-		rpfl2unicast_flr_act_set(self, 1U, i);
+		hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+		hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
 	}
 
-	reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
-	reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+	hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+	hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
 
 	/* Vlan filters */
-	rpf_vlan_outer_etht_set(self, 0x88A8U);
-	rpf_vlan_inner_etht_set(self, 0x8100U);
-	rpf_vlan_prom_mode_en_set(self, 1);
+	hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+	hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
+	hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
 
 	/* Rx Interrupts */
-	rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+	hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
 
 	/* misc */
-	rpfl2broadcast_flr_act_set(self, 1U);
-	rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+	hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+	hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
 
-	rdm_rx_dca_en_set(self, 0U);
-	rdm_rx_dca_mode_set(self, 0U);
+	hw_atl_rdm_rx_dca_en_set(self, 0U);
+	hw_atl_rdm_rx_dca_mode_set(self, 0U);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -301,10 +303,10 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
 	l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
 		(mac_addr[4] << 8) | mac_addr[5];
 
-	rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
-	rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
-	rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
-	rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
+	hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
+	hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
+	hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
+	hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
 
 	err = aq_hw_err_from_flags(self);
 
@@ -312,9 +314,7 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
 	return err;
 }
 
-static int hw_atl_a0_hw_init(struct aq_hw_s *self,
-			     struct aq_nic_cfg_s *aq_nic_cfg,
-			     u8 *mac_addr)
+static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
 {
 	static u32 aq_hw_atl_igcr_table_[4][2] = {
 		{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
@@ -325,10 +325,7 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
 
 	int err = 0;
 
-	self->aq_nic_cfg = aq_nic_cfg;
-
-	hw_atl_utils_hw_chip_features_init(self,
-					   &PHAL_ATLANTIC_A0->chip_features);
+	struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
 
 	hw_atl_a0_hw_init_tx_path(self);
 	hw_atl_a0_hw_init_rx_path(self);
@@ -337,8 +334,8 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
 
 	hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
 
-	reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
-	reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
+	hw_atl_reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
+	hw_atl_reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
 
 	hw_atl_a0_hw_qos_set(self);
 	hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
@@ -353,19 +350,18 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
 		goto err_exit;
 
 	/* Interrupts */
-	reg_irq_glb_ctl_set(self,
-			    aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
-						 [(aq_nic_cfg->vecs > 1U) ?
-						 1 : 0]);
+	hw_atl_reg_irq_glb_ctl_set(self,
+				   aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+					[(aq_nic_cfg->vecs > 1U) ? 1 : 0]);
 
-	itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+	hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
 
 	/* Interrupts */
-	reg_gen_irq_map_set(self,
-			    ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
-			    ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
-			    ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
-			    ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
+	hw_atl_reg_gen_irq_map_set(self,
+				   ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
+				   ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
+				   ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
+				   ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
 
 	hw_atl_a0_hw_offload_set(self, aq_nic_cfg);
 
@@ -376,28 +372,28 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
 static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
 				      struct aq_ring_s *ring)
 {
-	tdm_tx_desc_en_set(self, 1, ring->idx);
+	hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
 				      struct aq_ring_s *ring)
 {
-	rdm_rx_desc_en_set(self, 1, ring->idx);
+	hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_start(struct aq_hw_s *self)
 {
-	tpb_tx_buff_en_set(self, 1);
-	rpb_rx_buff_en_set(self, 1);
+	hw_atl_tpb_tx_buff_en_set(self, 1);
+	hw_atl_rpb_rx_buff_en_set(self, 1);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
 					    struct aq_ring_s *ring)
 {
-	reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+	hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
 	return 0;
 }
 
@@ -483,36 +479,37 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
 	u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
 	u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
 
-	rdm_rx_desc_en_set(self, false, aq_ring->idx);
+	hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
 
-	rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
 
-	reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
-					   aq_ring->idx);
+	hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+						  aq_ring->idx);
 
-	reg_rx_dma_desc_base_addressmswset(self,
-					   dma_desc_addr_msw, aq_ring->idx);
+	hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+						  dma_desc_addr_msw,
+						  aq_ring->idx);
 
-	rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
 
-	rdm_rx_desc_data_buff_size_set(self,
-				       AQ_CFG_RX_FRAME_MAX / 1024U,
+	hw_atl_rdm_rx_desc_data_buff_size_set(self,
+					      AQ_CFG_RX_FRAME_MAX / 1024U,
 				       aq_ring->idx);
 
-	rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
-	rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
-	rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+	hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
 
 	/* Rx ring set mode */
 
 	/* Mapping interrupt vector */
-	itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
-	itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+	hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+	hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
 
-	rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
-	rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
-	rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
-	rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+	hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -524,25 +521,25 @@ static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
 	u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
 	u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
 
-	reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
-					   aq_ring->idx);
+	hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+						  aq_ring->idx);
 
-	reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
-					   aq_ring->idx);
+	hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+						  aq_ring->idx);
 
-	tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+	hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
 
 	hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring);
 
 	/* Set Tx threshold */
-	tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+	hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
 
 	/* Mapping interrupt vector */
-	itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
-	itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+	hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+	hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
 
-	tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
-	tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+	hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -563,7 +560,7 @@ static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
 		rxd->hdr_addr = 0U;
 	}
 
-	reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+	hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -572,13 +569,13 @@ static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
 					    struct aq_ring_s *ring)
 {
 	int err = 0;
-	unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+	unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
 
-	if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+	if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
 		err = -ENXIO;
 		goto err_exit;
 	}
-	ring->hw_head = hw_head_;
+	ring->hw_head = hw_head;
 	err = aq_hw_err_from_flags(self);
 
 err_exit:
@@ -602,15 +599,16 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
 
 		if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */
 			if ((1U << 4) &
-				reg_rx_dma_desc_status_get(self, ring->idx)) {
-			rdm_rx_desc_en_set(self, false, ring->idx);
-			rdm_rx_desc_res_set(self, true, ring->idx);
-			rdm_rx_desc_res_set(self, false, ring->idx);
-			rdm_rx_desc_en_set(self, true, ring->idx);
+			hw_atl_reg_rx_dma_desc_status_get(self, ring->idx)) {
+				hw_atl_rdm_rx_desc_en_set(self, false, ring->idx);
+				hw_atl_rdm_rx_desc_res_set(self, true, ring->idx);
+				hw_atl_rdm_rx_desc_res_set(self, false, ring->idx);
+				hw_atl_rdm_rx_desc_en_set(self, true, ring->idx);
 			}
 
 			if (ring->hw_head ||
-			    (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) {
+			    (hw_atl_rdm_rx_desc_head_ptr_get(self,
+							     ring->idx) < 2U)) {
 				break;
 			} else if (!(rxd_wb->status & 0x1U)) {
 				struct hw_atl_rxd_wb_s *rxd_wb1 =
@@ -693,26 +691,25 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
 
 static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
 {
-	itr_irq_msk_setlsw_set(self, LODWORD(mask) |
+	hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) |
 			       (1U << HW_ATL_A0_ERR_INT));
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
 {
-	itr_irq_msk_clearlsw_set(self, LODWORD(mask));
-	itr_irq_status_clearlsw_set(self, LODWORD(mask));
+	hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+	hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
 
-	if ((1U << 16) & reg_gen_irq_status_get(self))
-
-		atomic_inc(&PHAL_ATLANTIC_A0->dpc);
+	if ((1U << 16) & hw_atl_reg_gen_irq_status_get(self))
+		atomic_inc(&self->dpc);
 
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
 {
-	*mask = itr_irq_statuslsw_get(self);
+	*mask = hw_atl_itr_irq_statuslsw_get(self);
 	return aq_hw_err_from_flags(self);
 }
 
@@ -723,18 +720,20 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
 {
 	unsigned int i = 0U;
 
-	rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
-	rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0);
-	rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+	hw_atl_rpfl2promiscuous_mode_en_set(self,
+					    IS_FILTER_ENABLED(IFF_PROMISC));
+	hw_atl_rpfl2multicast_flr_en_set(self,
+					 IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+	hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
 
 	self->aq_nic_cfg->is_mc_list_enabled =
 			IS_FILTER_ENABLED(IFF_MULTICAST);
 
 	for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
-		rpfl2_uc_flr_en_set(self,
-				    (self->aq_nic_cfg->is_mc_list_enabled &&
-				    (i <= self->aq_nic_cfg->mc_list_count)) ?
-				    1U : 0U, i);
+		hw_atl_rpfl2_uc_flr_en_set(self,
+					   (self->aq_nic_cfg->is_mc_list_enabled &&
+					   (i <= self->aq_nic_cfg->mc_list_count)) ?
+					    1U : 0U, i);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -761,17 +760,19 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
 		u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
 					(ar_mac[i][4] << 8) | ar_mac[i][5];
 
-		rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
+		hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
 
-		rpfl2unicast_dest_addresslsw_set(self,
-						 l, HW_ATL_A0_MAC_MIN + i);
+		hw_atl_rpfl2unicast_dest_addresslsw_set(self,
+							l,
+							HW_ATL_A0_MAC_MIN + i);
 
-		rpfl2unicast_dest_addressmsw_set(self,
-						 h, HW_ATL_A0_MAC_MIN + i);
+		hw_atl_rpfl2unicast_dest_addressmsw_set(self,
+							h,
+							HW_ATL_A0_MAC_MIN + i);
 
-		rpfl2_uc_flr_en_set(self,
-				    (self->aq_nic_cfg->is_mc_list_enabled),
-				    HW_ATL_A0_MAC_MIN + i);
+		hw_atl_rpfl2_uc_flr_en_set(self,
+					   (self->aq_nic_cfg->is_mc_list_enabled),
+					   HW_ATL_A0_MAC_MIN + i);
 	}
 
 	err = aq_hw_err_from_flags(self);
@@ -823,7 +824,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 	}
 
 	for (i = HW_ATL_A0_RINGS_MAX; i--;)
-		reg_irq_thr_set(self, itr_rx, i);
+		hw_atl_reg_irq_thr_set(self, itr_rx, i);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -837,14 +838,14 @@ static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
 static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
 				     struct aq_ring_s *ring)
 {
-	tdm_tx_desc_en_set(self, 0U, ring->idx);
+	hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
 				     struct aq_ring_s *ring)
 {
-	rdm_rx_desc_en_set(self, 0U, ring->idx);
+	hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
@@ -860,7 +861,7 @@ static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed)
 	return err;
 }
 
-static struct aq_hw_ops hw_atl_ops_ = {
+static const struct aq_hw_ops hw_atl_ops_ = {
 	.create               = hw_atl_a0_create,
 	.destroy              = hw_atl_a0_destroy,
 	.get_hw_caps          = hw_atl_a0_get_hw_caps,
@@ -903,7 +904,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
 	.hw_get_fw_version           = hw_atl_utils_get_fw_version,
 };
 
-struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
+const struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
 {
 	bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
 	bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
index 6e1d527..4fdd51b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
@@ -29,6 +29,6 @@
 
 #endif
 
-struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
+const struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
 
 #endif /* HW_ATL_A0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 0592a03..7a71330 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -88,37 +88,6 @@
 
 #define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
 
-/* Hardware tx descriptor */
-struct __packed hw_atl_txd_s {
-	u64 buf_addr;
-	u32 ctl;
-	u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
-};
-
-/* Hardware tx context descriptor */
-struct __packed hw_atl_txc_s {
-	u32 rsvd;
-	u32 len;
-	u32 ctl;
-	u32 len2;
-};
-
-/* Hardware rx descriptor */
-struct __packed hw_atl_rxd_s {
-	u64 buf_addr;
-	u64 hdr_addr;
-};
-
-/* Hardware rx descriptor writeback */
-struct __packed hw_atl_rxd_wb_s {
-	u32 type;
-	u32 rss_hash;
-	u16 status;
-	u16 pkt_len;
-	u16 next_desc_ptr;
-	u16 vlan;
-};
-
 /* HW layer capabilities */
 static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
 	.ports = 1U,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index e4a22ce..0b09016 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -12,6 +12,7 @@
 #include "../aq_hw.h"
 #include "../aq_hw_utils.h"
 #include "../aq_ring.h"
+#include "../aq_nic.h"
 #include "hw_atl_b0.h"
 #include "hw_atl_utils.h"
 #include "hw_atl_llh.h"
@@ -37,21 +38,20 @@ static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
 }
 
 static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func,
-					unsigned int port,
-					struct aq_hw_ops *ops)
+					unsigned int port)
 {
-	struct hw_atl_s *self = NULL;
+	struct aq_hw_s *self = NULL;
 
 	self = kzalloc(sizeof(*self), GFP_KERNEL);
 	if (!self)
 		goto err_exit;
 
-	self->base.aq_pci_func = aq_pci_func;
+	self->aq_pci_func = aq_pci_func;
 
-	self->base.not_ff_addr = 0x10U;
+	self->not_ff_addr = 0x10U;
 
 err_exit:
-	return (struct aq_hw_s *)self;
+	return self;
 }
 
 static void hw_atl_b0_destroy(struct aq_hw_s *self)
@@ -63,24 +63,24 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
 {
 	int err = 0;
 
-	glb_glb_reg_res_dis_set(self, 1U);
-	pci_pci_reg_res_dis_set(self, 0U);
-	rx_rx_reg_res_dis_set(self, 0U);
-	tx_tx_reg_res_dis_set(self, 0U);
+	hw_atl_glb_glb_reg_res_dis_set(self, 1U);
+	hw_atl_pci_pci_reg_res_dis_set(self, 0U);
+	hw_atl_rx_rx_reg_res_dis_set(self, 0U);
+	hw_atl_tx_tx_reg_res_dis_set(self, 0U);
 
 	HW_ATL_FLUSH();
-	glb_soft_res_set(self, 1);
+	hw_atl_glb_soft_res_set(self, 1);
 
 	/* check 10 times by 1ms */
-	AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+	AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U);
 	if (err < 0)
 		goto err_exit;
 
-	itr_irq_reg_res_dis_set(self, 0U);
-	itr_res_irq_set(self, 1U);
+	hw_atl_itr_irq_reg_res_dis_set(self, 0U);
+	hw_atl_itr_res_irq_set(self, 1U);
 
 	/* check 10 times by 1ms */
-	AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+	AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U);
 	if (err < 0)
 		goto err_exit;
 
@@ -100,51 +100,53 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
 	bool is_rx_flow_control = false;
 
 	/* TPS Descriptor rate init */
-	tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
-	tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+	hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+	hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
 
 	/* TPS VM init */
-	tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
 
 	/* TPS TC credits init */
-	tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
-	tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+	hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
 
-	tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
-	tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
-	tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
-	tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+	hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+	hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+	hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
 
 	/* Tx buf size */
 	buff_size = HW_ATL_B0_TXBUF_MAX;
 
-	tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
-	tpb_tx_buff_hi_threshold_per_tc_set(self,
-					    (buff_size * (1024 / 32U) * 66U) /
-					    100U, tc);
-	tpb_tx_buff_lo_threshold_per_tc_set(self,
-					    (buff_size * (1024 / 32U) * 50U) /
-					    100U, tc);
+	hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+	hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024 / 32U) * 66U) /
+						   100U, tc);
+	hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024 / 32U) * 50U) /
+						   100U, tc);
 
 	/* QoS Rx buf size per TC */
 	tc = 0;
 	is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
 	buff_size = HW_ATL_B0_RXBUF_MAX;
 
-	rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
-	rpb_rx_buff_hi_threshold_per_tc_set(self,
-					    (buff_size *
-					    (1024U / 32U) * 66U) /
-					    100U, tc);
-	rpb_rx_buff_lo_threshold_per_tc_set(self,
-					    (buff_size *
-					    (1024U / 32U) * 50U) /
-					    100U, tc);
-	rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+	hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+	hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024U / 32U) * 66U) /
+						   100U, tc);
+	hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
+						   (buff_size *
+						   (1024U / 32U) * 50U) /
+						   100U, tc);
+	hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
 
 	/* QoS 802.1p priority -> TC mapping */
 	for (i_priority = 8U; i_priority--;)
-		rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+		hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -152,20 +154,19 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
 static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
 				     struct aq_rss_parameters *rss_params)
 {
-	struct aq_nic_cfg_s *cfg = NULL;
+	struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
 	int err = 0;
 	unsigned int i = 0U;
 	unsigned int addr = 0U;
 
-	cfg = self->aq_nic_cfg;
-
 	for (i = 10, addr = 0U; i--; ++addr) {
 		u32 key_data = cfg->is_rss ?
 			__swab32(rss_params->hash_secret_key[i]) : 0U;
-		rpf_rss_key_wr_data_set(self, key_data);
-		rpf_rss_key_addr_set(self, addr);
-		rpf_rss_key_wr_en_set(self, 1U);
-		AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+		hw_atl_rpf_rss_key_wr_data_set(self, key_data);
+		hw_atl_rpf_rss_key_addr_set(self, addr);
+		hw_atl_rpf_rss_key_wr_en_set(self, 1U);
+		AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
+			       1000U, 10U);
 		if (err < 0)
 			goto err_exit;
 	}
@@ -194,11 +195,12 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
 			((i * 3U) & 0xFU));
 	}
 
-	for (i = AQ_DIMOF(bitary); i--;) {
-		rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
-		rpf_rss_redir_tbl_addr_set(self, i);
-		rpf_rss_redir_wr_en_set(self, 1U);
-		AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+	for (i = ARRAY_SIZE(bitary); i--;) {
+		hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+		hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
+		hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
+		AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
+			       1000U, 10U);
 		if (err < 0)
 			goto err_exit;
 	}
@@ -215,15 +217,15 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
 	unsigned int i;
 
 	/* TX checksums offloads*/
-	tpo_ipv4header_crc_offload_en_set(self, 1);
-	tpo_tcp_udp_crc_offload_en_set(self, 1);
+	hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
+	hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
 
 	/* RX checksums offloads*/
-	rpo_ipv4header_crc_offload_en_set(self, 1);
-	rpo_tcp_udp_crc_offload_en_set(self, 1);
+	hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
+	hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
 
 	/* LSO offloads*/
-	tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+	hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
 
 /* LRO offloads */
 	{
@@ -232,43 +234,44 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
 			((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
 
 		for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
-			rpo_lro_max_num_of_descriptors_set(self, val, i);
+			hw_atl_rpo_lro_max_num_of_descriptors_set(self, val, i);
 
-		rpo_lro_time_base_divider_set(self, 0x61AU);
-		rpo_lro_inactive_interval_set(self, 0);
-		rpo_lro_max_coalescing_interval_set(self, 2);
+		hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
+		hw_atl_rpo_lro_inactive_interval_set(self, 0);
+		hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
 
-		rpo_lro_qsessions_lim_set(self, 1U);
+		hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
 
-		rpo_lro_total_desc_lim_set(self, 2U);
+		hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
 
-		rpo_lro_patch_optimization_en_set(self, 0U);
+		hw_atl_rpo_lro_patch_optimization_en_set(self, 0U);
 
-		rpo_lro_min_pay_of_first_pkt_set(self, 10U);
+		hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
 
-		rpo_lro_pkt_lim_set(self, 1U);
+		hw_atl_rpo_lro_pkt_lim_set(self, 1U);
 
-		rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+		hw_atl_rpo_lro_en_set(self,
+				      aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
 	}
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
 {
-	thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
-	thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
-	thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+	hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+	hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+	hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
 
 	/* Tx interrupts */
-	tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+	hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
 
 	/* misc */
 	aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
 			0x00010000U : 0x00000000U);
-	tdm_tx_dca_en_set(self, 0U);
-	tdm_tx_dca_mode_set(self, 0U);
+	hw_atl_tdm_tx_dca_en_set(self, 0U);
+	hw_atl_tdm_tx_dca_mode_set(self, 0U);
 
-	tpb_tx_path_scp_ins_en_set(self, 1U);
+	hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -279,55 +282,55 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
 	int i;
 
 	/* Rx TC/RSS number config */
-	rpb_rpf_rx_traf_class_mode_set(self, 1U);
+	hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
 
 	/* Rx flow control */
-	rpb_rx_flow_ctl_mode_set(self, 1U);
+	hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
 
 	/* RSS Ring selection */
-	reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+	hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
 					0xB3333333U : 0x00000000U);
 
 	/* Multicast filters */
 	for (i = HW_ATL_B0_MAC_MAX; i--;) {
-		rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
-		rpfl2unicast_flr_act_set(self, 1U, i);
+		hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+		hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
 	}
 
-	reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
-	reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+	hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+	hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
 
 	/* Vlan filters */
-	rpf_vlan_outer_etht_set(self, 0x88A8U);
-	rpf_vlan_inner_etht_set(self, 0x8100U);
+	hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
+	hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
 
 	if (cfg->vlan_id) {
-		rpf_vlan_flr_act_set(self, 1U, 0U);
-		rpf_vlan_id_flr_set(self, 0U, 0U);
-		rpf_vlan_flr_en_set(self, 0U, 0U);
+		hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
+		hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
+		hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
 
-		rpf_vlan_accept_untagged_packets_set(self, 1U);
-		rpf_vlan_untagged_act_set(self, 1U);
+		hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+		hw_atl_rpf_vlan_untagged_act_set(self, 1U);
 
-		rpf_vlan_flr_act_set(self, 1U, 1U);
-		rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
-		rpf_vlan_flr_en_set(self, 1U, 1U);
+		hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
+		hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
+		hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
 	} else {
-		rpf_vlan_prom_mode_en_set(self, 1);
+		hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
 	}
 
 	/* Rx Interrupts */
-	rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+	hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
 
 	/* misc */
 	aq_hw_write_reg(self, 0x00005040U,
 			IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
 
-	rpfl2broadcast_flr_act_set(self, 1U);
-	rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+	hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
+	hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
 
-	rdm_rx_dca_en_set(self, 0U);
-	rdm_rx_dca_mode_set(self, 0U);
+	hw_atl_rdm_rx_dca_en_set(self, 0U);
+	hw_atl_rdm_rx_dca_mode_set(self, 0U);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -346,10 +349,10 @@ static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
 	l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
 		(mac_addr[4] << 8) | mac_addr[5];
 
-	rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
-	rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
-	rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
-	rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
+	hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
+	hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
+	hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
+	hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
 
 	err = aq_hw_err_from_flags(self);
 
@@ -357,9 +360,7 @@ static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
 	return err;
 }
 
-static int hw_atl_b0_hw_init(struct aq_hw_s *self,
-			     struct aq_nic_cfg_s *aq_nic_cfg,
-			     u8 *mac_addr)
+static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
 {
 	static u32 aq_hw_atl_igcr_table_[4][2] = {
 		{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
@@ -371,10 +372,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
 	int err = 0;
 	u32 val;
 
-	self->aq_nic_cfg = aq_nic_cfg;
-
-	hw_atl_utils_hw_chip_features_init(self,
-					   &PHAL_ATLANTIC_B0->chip_features);
+	struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
 
 	hw_atl_b0_hw_init_tx_path(self);
 	hw_atl_b0_hw_init_rx_path(self);
@@ -388,14 +386,15 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
 	hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
 
 	/* Force limit MRRS on RDM/TDM to 2K */
-	val = aq_hw_read_reg(self, pci_reg_control6_adr);
-	aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
+	val = aq_hw_read_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR);
+	aq_hw_write_reg(self, HW_ATL_PCI_REG_CONTROL6_ADR,
+			(val & ~0x707) | 0x404);
 
 	/* TX DMA total request limit. B0 hardware is not capable to
 	 * handle more than (8K-MRRS) incoming DMA data.
 	 * Value 24 in 256byte units
 	 */
-	aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
+	aq_hw_write_reg(self, HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
 
 	/* Reset link status and read out initial hardware counters */
 	self->aq_link_status.mbps = 0;
@@ -406,16 +405,16 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
 		goto err_exit;
 
 	/* Interrupts */
-	reg_irq_glb_ctl_set(self,
-			    aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+	hw_atl_reg_irq_glb_ctl_set(self,
+				   aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
 						 [(aq_nic_cfg->vecs > 1U) ?
 						 1 : 0]);
 
-	itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+	hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
 
 	/* Interrupts */
-	reg_gen_irq_map_set(self,
-			    ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
+	hw_atl_reg_gen_irq_map_set(self,
+				   ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
 			    ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
 
 	hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
@@ -427,28 +426,28 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
 static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
 				      struct aq_ring_s *ring)
 {
-	tdm_tx_desc_en_set(self, 1, ring->idx);
+	hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
 				      struct aq_ring_s *ring)
 {
-	rdm_rx_desc_en_set(self, 1, ring->idx);
+	hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_start(struct aq_hw_s *self)
 {
-	tpb_tx_buff_en_set(self, 1);
-	rpb_rx_buff_en_set(self, 1);
+	hw_atl_tpb_tx_buff_en_set(self, 1);
+	hw_atl_rpb_rx_buff_en_set(self, 1);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
 					    struct aq_ring_s *ring)
 {
-	reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+	hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
 	return 0;
 }
 
@@ -534,36 +533,36 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
 	u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
 	u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
 
-	rdm_rx_desc_en_set(self, false, aq_ring->idx);
+	hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
 
-	rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
 
-	reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
-					   aq_ring->idx);
+	hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+						  aq_ring->idx);
 
-	reg_rx_dma_desc_base_addressmswset(self,
-					   dma_desc_addr_msw, aq_ring->idx);
+	hw_atl_reg_rx_dma_desc_base_addressmswset(self,
+						  dma_desc_addr_msw, aq_ring->idx);
 
-	rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
 
-	rdm_rx_desc_data_buff_size_set(self,
-				       AQ_CFG_RX_FRAME_MAX / 1024U,
+	hw_atl_rdm_rx_desc_data_buff_size_set(self,
+					      AQ_CFG_RX_FRAME_MAX / 1024U,
 				       aq_ring->idx);
 
-	rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
-	rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
-	rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+	hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
 
 	/* Rx ring set mode */
 
 	/* Mapping interrupt vector */
-	itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
-	itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+	hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+	hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
 
-	rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
-	rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
-	rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
-	rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+	hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -575,25 +574,25 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
 	u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
 	u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
 
-	reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
-					   aq_ring->idx);
+	hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+						  aq_ring->idx);
 
-	reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
-					   aq_ring->idx);
+	hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+						  aq_ring->idx);
 
-	tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+	hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
 
 	hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
 
 	/* Set Tx threshold */
-	tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+	hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
 
 	/* Mapping interrupt vector */
-	itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
-	itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+	hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+	hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
 
-	tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
-	tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+	hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+	hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -614,7 +613,7 @@ static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
 		rxd->hdr_addr = 0U;
 	}
 
-	reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+	hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
 
 	return aq_hw_err_from_flags(self);
 }
@@ -623,9 +622,9 @@ static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
 					    struct aq_ring_s *ring)
 {
 	int err = 0;
-	unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+	unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
 
-	if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+	if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
 		err = -ENXIO;
 		goto err_exit;
 	}
@@ -728,22 +727,22 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
 
 static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
 {
-	itr_irq_msk_setlsw_set(self, LODWORD(mask));
+	hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
 {
-	itr_irq_msk_clearlsw_set(self, LODWORD(mask));
-	itr_irq_status_clearlsw_set(self, LODWORD(mask));
+	hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+	hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
 
-	atomic_inc(&PHAL_ATLANTIC_B0->dpc);
+	atomic_inc(&self->dpc);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
 {
-	*mask = itr_irq_statuslsw_get(self);
+	*mask = hw_atl_itr_irq_statuslsw_get(self);
 	return aq_hw_err_from_flags(self);
 }
 
@@ -754,20 +753,20 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
 {
 	unsigned int i = 0U;
 
-	rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
-	rpfl2multicast_flr_en_set(self,
-				  IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+	hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
+	hw_atl_rpfl2multicast_flr_en_set(self,
+					 IS_FILTER_ENABLED(IFF_MULTICAST), 0);
 
-	rpfl2_accept_all_mc_packets_set(self,
-					IS_FILTER_ENABLED(IFF_ALLMULTI));
+	hw_atl_rpfl2_accept_all_mc_packets_set(self,
+					       IS_FILTER_ENABLED(IFF_ALLMULTI));
 
-	rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+	hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
 
 	self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
 
 	for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
-		rpfl2_uc_flr_en_set(self,
-				    (self->aq_nic_cfg->is_mc_list_enabled &&
+		hw_atl_rpfl2_uc_flr_en_set(self,
+					   (self->aq_nic_cfg->is_mc_list_enabled &&
 				    (i <= self->aq_nic_cfg->mc_list_count)) ?
 				    1U : 0U, i);
 
@@ -796,16 +795,16 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
 		u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
 					(ar_mac[i][4] << 8) | ar_mac[i][5];
 
-		rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
+		hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
 
-		rpfl2unicast_dest_addresslsw_set(self,
-						 l, HW_ATL_B0_MAC_MIN + i);
+		hw_atl_rpfl2unicast_dest_addresslsw_set(self,
+							l, HW_ATL_B0_MAC_MIN + i);
 
-		rpfl2unicast_dest_addressmsw_set(self,
-						 h, HW_ATL_B0_MAC_MIN + i);
+		hw_atl_rpfl2unicast_dest_addressmsw_set(self,
+							h, HW_ATL_B0_MAC_MIN + i);
 
-		rpfl2_uc_flr_en_set(self,
-				    (self->aq_nic_cfg->is_mc_list_enabled),
+		hw_atl_rpfl2_uc_flr_en_set(self,
+					   (self->aq_nic_cfg->is_mc_list_enabled),
 				    HW_ATL_B0_MAC_MIN + i);
 	}
 
@@ -824,10 +823,10 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 	switch (self->aq_nic_cfg->itr) {
 	case  AQ_CFG_INTERRUPT_MODERATION_ON:
 	case  AQ_CFG_INTERRUPT_MODERATION_AUTO:
-		tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
-		tdm_tdm_intr_moder_en_set(self, 1U);
-		rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
-		rdm_rdm_intr_moder_en_set(self, 1U);
+		hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+		hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
+		hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+		hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
 
 		if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
 			/* HW timers are in 2us units */
@@ -887,18 +886,18 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 		}
 		break;
 	case AQ_CFG_INTERRUPT_MODERATION_OFF:
-		tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
-		tdm_tdm_intr_moder_en_set(self, 0U);
-		rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
-		rdm_rdm_intr_moder_en_set(self, 0U);
+		hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+		hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
+		hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+		hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
 		itr_tx = 0U;
 		itr_rx = 0U;
 		break;
 	}
 
 	for (i = HW_ATL_B0_RINGS_MAX; i--;) {
-		reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
-		reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
+		hw_atl_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+		hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
 	}
 
 	return aq_hw_err_from_flags(self);
@@ -913,14 +912,14 @@ static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
 static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
 				     struct aq_ring_s *ring)
 {
-	tdm_tx_desc_en_set(self, 0U, ring->idx);
+	hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
 static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
 				     struct aq_ring_s *ring)
 {
-	rdm_rx_desc_en_set(self, 0U, ring->idx);
+	hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
 	return aq_hw_err_from_flags(self);
 }
 
@@ -936,7 +935,7 @@ static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed)
 	return err;
 }
 
-static struct aq_hw_ops hw_atl_ops_ = {
+static const struct aq_hw_ops hw_atl_ops_ = {
 	.create               = hw_atl_b0_create,
 	.destroy              = hw_atl_b0_destroy,
 	.get_hw_caps          = hw_atl_b0_get_hw_caps,
@@ -979,7 +978,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
 	.hw_get_fw_version           = hw_atl_utils_get_fw_version,
 };
 
-struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
+const struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
 {
 	bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
 	bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index a1e1bce6..3e10969 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -29,6 +29,6 @@
 
 #endif
 
-struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
+const struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
 
 #endif /* HW_ATL_B0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 9aa2c6e..740ff73 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -142,37 +142,6 @@
 #define HW_ATL_INTR_MODER_MAX  0x1FF
 #define HW_ATL_INTR_MODER_MIN  0xFF
 
-/* Hardware tx descriptor */
-struct __packed hw_atl_txd_s {
-	u64 buf_addr;
-	u32 ctl;
-	u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
-};
-
-/* Hardware tx context descriptor */
-struct __packed hw_atl_txc_s {
-	u32 rsvd;
-	u32 len;
-	u32 ctl;
-	u32 len2;
-};
-
-/* Hardware rx descriptor */
-struct __packed hw_atl_rxd_s {
-	u64 buf_addr;
-	u64 hdr_addr;
-};
-
-/* Hardware rx descriptor writeback */
-struct __packed hw_atl_rxd_wb_s {
-	u32 type;
-	u32 rss_hash;
-	u16 status;
-	u16 pkt_len;
-	u16 next_desc_ptr;
-	u16 vlan;
-};
-
 /* HW layer capabilities */
 static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
 	.ports = 1U,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 3de651a..10ba035 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -16,111 +16,115 @@
 #include "../aq_hw_utils.h"
 
 /* global */
-void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore)
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+				u32 semaphore)
 {
-	aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem);
+	aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore), glb_cpu_sem);
 }
 
-u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
 {
-	return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore));
+	return aq_hw_read_reg(aq_hw, HW_ATL_GLB_CPU_SEM_ADR(semaphore));
 }
 
-void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
 {
-	aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr,
-			    glb_reg_res_dis_msk,
-			    glb_reg_res_dis_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_REG_RES_DIS_ADR,
+			    HW_ATL_GLB_REG_RES_DIS_MSK,
+			    HW_ATL_GLB_REG_RES_DIS_SHIFT,
 			    glb_reg_res_dis);
 }
 
-void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
 {
-	aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
-			    glb_soft_res_shift, soft_res);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+			    HW_ATL_GLB_SOFT_RES_MSK,
+			    HW_ATL_GLB_SOFT_RES_SHIFT, soft_res);
 }
 
-u32 glb_soft_res_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr,
-				  glb_soft_res_msk,
-				  glb_soft_res_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_GLB_SOFT_RES_ADR,
+				  HW_ATL_GLB_SOFT_RES_MSK,
+				  HW_ATL_GLB_SOFT_RES_SHIFT);
 }
 
-u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_STAT_COUNTER7_ADR);
 }
 
-u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, glb_mif_id_adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR);
 }
 
 /* stats */
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR);
 }
 
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW);
 }
 
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW);
 }
 
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW);
 }
 
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW);
 }
 
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW);
 }
 
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW);
 }
 
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW);
 }
 
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW);
 }
 
 /* interrupt */
-void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw)
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+				     u32 irq_auto_masklsw)
 {
-	aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw);
+	aq_hw_write_reg(aq_hw, HW_ATL_ITR_IAMRLSW_ADR, irq_auto_masklsw);
 }
 
-void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+				  u32 rx)
 {
 /* register address for bitfield imr_rx{r}_en */
 	static u32 itr_imr_rxren_adr[32] = {
 			0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
-			0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+			0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
 			0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
-			0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+			0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
 			0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
-			0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+			0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
 			0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
-			0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+			0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
 		};
 
 /* bitmask for bitfield imr_rx{r}_en */
@@ -149,18 +153,19 @@ void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
 			    irq_map_en_rx);
 }
 
-void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+				  u32 tx)
 {
 /* register address for bitfield imr_tx{t}_en */
 	static u32 itr_imr_txten_adr[32] = {
 			0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
-			0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+			0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
 			0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
-			0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+			0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
 			0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
-			0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+			0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
 			0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
-			0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+			0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
 		};
 
 /* bitmask for bitfield imr_tx{t}_en */
@@ -189,30 +194,30 @@ void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
 			    irq_map_en_tx);
 }
 
-void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
 {
 /* register address for bitfield imr_rx{r}[4:0] */
 	static u32 itr_imr_rxr_adr[32] = {
 			0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
-			0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+			0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
 			0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
-			0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+			0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
 			0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
-			0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+			0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
 			0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
-			0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+			0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
 		};
 
 /* bitmask for bitfield imr_rx{r}[4:0] */
 	static u32 itr_imr_rxr_msk[32] = {
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
-			0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU,
+			0x00001f00U, 0x0000001FU, 0x00001F00U, 0x0000001FU
 		};
 
 /* lower bit position of bitfield imr_rx{r}[4:0] */
@@ -229,30 +234,30 @@ void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
 			    irq_map_rx);
 }
 
-void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
 {
 /* register address for bitfield imr_tx{t}[4:0] */
 	static u32 itr_imr_txt_adr[32] = {
 			0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
-			0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+			0x00002108U, 0x00002108U, 0x0000210CU, 0x0000210CU,
 			0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
-			0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+			0x00002118U, 0x00002118U, 0x0000211CU, 0x0000211CU,
 			0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
-			0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+			0x00002128U, 0x00002128U, 0x0000212CU, 0x0000212CU,
 			0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
-			0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+			0x00002138U, 0x00002138U, 0x0000213CU, 0x0000213CU
 		};
 
 /* bitmask for bitfield imr_tx{t}[4:0] */
 	static u32 itr_imr_txt_msk[32] = {
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
-			0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U,
+			0x1f000000U, 0x001F0000U, 0x1F000000U, 0x001F0000U
 		};
 
 /* lower bit position of bitfield imr_tx{t}[4:0] */
@@ -269,429 +274,463 @@ void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
 			    irq_map_tx);
 }
 
-void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw)
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+				     u32 irq_msk_clearlsw)
 {
-	aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw);
+	aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMCRLSW_ADR, irq_msk_clearlsw);
 }
 
-void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
 {
-	aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw);
+	aq_hw_write_reg(aq_hw, HW_ATL_ITR_IMSRLSW_ADR, irq_msk_setlsw);
 }
 
-void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
 {
-	aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr,
-			    itr_reg_res_dsbl_msk,
-			    itr_reg_res_dsbl_shift, irq_reg_res_dis);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_REG_RES_DSBL_ADR,
+			    HW_ATL_ITR_REG_RES_DSBL_MSK,
+			    HW_ATL_ITR_REG_RES_DSBL_SHIFT, irq_reg_res_dis);
 }
 
-void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
-				 u32 irq_status_clearlsw)
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+					u32 irq_status_clearlsw)
 {
-	aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw);
+	aq_hw_write_reg(aq_hw, HW_ATL_ITR_ISCRLSW_ADR, irq_status_clearlsw);
 }
 
-u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, itr_isrlsw_adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_ITR_ISRLSW_ADR);
 }
 
-u32 itr_res_irq_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
-				  itr_res_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+				  HW_ATL_ITR_RES_SHIFT);
 }
 
-void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
 {
-	aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
-			    itr_res_shift, res_irq);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RES_ADR, HW_ATL_ITR_RES_MSK,
+			    HW_ATL_ITR_RES_SHIFT, res_irq);
 }
 
 /* rdm */
-void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca),
-			    rdm_dcadcpuid_msk,
-			    rdm_dcadcpuid_shift, cpuid);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADCPUID_ADR(dca),
+			    HW_ATL_RDM_DCADCPUID_MSK,
+			    HW_ATL_RDM_DCADCPUID_SHIFT, cpuid);
 }
 
-void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk,
-			    rdm_dca_en_shift, rx_dca_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_EN_ADR, HW_ATL_RDM_DCA_EN_MSK,
+			    HW_ATL_RDM_DCA_EN_SHIFT, rx_dca_en);
 }
 
-void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk,
-			    rdm_dca_mode_shift, rx_dca_mode);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCA_MODE_ADR,
+			    HW_ATL_RDM_DCA_MODE_MSK,
+			    HW_ATL_RDM_DCA_MODE_SHIFT, rx_dca_mode);
 }
 
-void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_data_buff_size, u32 descriptor)
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_data_buff_size,
+					   u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor),
-			    rdm_descddata_size_msk,
-			    rdm_descddata_size_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor),
+			    HW_ATL_RDM_DESCDDATA_SIZE_MSK,
+			    HW_ATL_RDM_DESCDDATA_SIZE_SHIFT,
 			    rx_desc_data_buff_size);
 }
 
-void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca)
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+				   u32 dca)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca),
-			    rdm_dcaddesc_en_msk,
-			    rdm_dcaddesc_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADDESC_EN_ADR(dca),
+			    HW_ATL_RDM_DCADDESC_EN_MSK,
+			    HW_ATL_RDM_DCADDESC_EN_SHIFT,
 			    rx_desc_dca_en);
 }
 
-void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor)
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+			       u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor),
-			    rdm_descden_msk,
-			    rdm_descden_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDEN_ADR(descriptor),
+			    HW_ATL_RDM_DESCDEN_MSK,
+			    HW_ATL_RDM_DESCDEN_SHIFT,
 			    rx_desc_en);
 }
 
-void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_head_buff_size, u32 descriptor)
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_head_buff_size,
+					   u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor),
-			    rdm_descdhdr_size_msk,
-			    rdm_descdhdr_size_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor),
+			    HW_ATL_RDM_DESCDHDR_SIZE_MSK,
+			    HW_ATL_RDM_DESCDHDR_SIZE_SHIFT,
 			    rx_desc_head_buff_size);
 }
 
-void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_head_splitting, u32 descriptor)
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_head_splitting,
+					   u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor),
-			    rdm_descdhdr_split_msk,
-			    rdm_descdhdr_split_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor),
+			    HW_ATL_RDM_DESCDHDR_SPLIT_MSK,
+			    HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT,
 			    rx_desc_head_splitting);
 }
 
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
 {
-	return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor),
-				  rdm_descdhd_msk, rdm_descdhd_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_DESCDHD_ADR(descriptor),
+				  HW_ATL_RDM_DESCDHD_MSK,
+				  HW_ATL_RDM_DESCDHD_SHIFT);
 }
 
-void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor)
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+				u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor),
-			    rdm_descdlen_msk, rdm_descdlen_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDLEN_ADR(descriptor),
+			    HW_ATL_RDM_DESCDLEN_MSK, HW_ATL_RDM_DESCDLEN_SHIFT,
 			    rx_desc_len);
 }
 
-void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor)
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+				u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor),
-			    rdm_descdreset_msk, rdm_descdreset_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DESCDRESET_ADR(descriptor),
+			    HW_ATL_RDM_DESCDRESET_MSK,
+			    HW_ATL_RDM_DESCDRESET_SHIFT,
 			    rx_desc_res);
 }
 
-void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
-				  u32 rx_desc_wr_wb_irq_en)
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+					 u32 rx_desc_wr_wb_irq_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr,
-			    rdm_int_desc_wrb_en_msk,
-			    rdm_int_desc_wrb_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_DESC_WRB_EN_ADR,
+			    HW_ATL_RDM_INT_DESC_WRB_EN_MSK,
+			    HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT,
 			    rx_desc_wr_wb_irq_en);
 }
 
-void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca)
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+				   u32 dca)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca),
-			    rdm_dcadhdr_en_msk,
-			    rdm_dcadhdr_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADHDR_EN_ADR(dca),
+			    HW_ATL_RDM_DCADHDR_EN_MSK,
+			    HW_ATL_RDM_DCADHDR_EN_SHIFT,
 			    rx_head_dca_en);
 }
 
-void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca)
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+				  u32 dca)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca),
-			    rdm_dcadpay_en_msk, rdm_dcadpay_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_DCADPAY_EN_ADR(dca),
+			    HW_ATL_RDM_DCADPAY_EN_MSK,
+			    HW_ATL_RDM_DCADPAY_EN_SHIFT,
 			    rx_pld_dca_en);
 }
 
-void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en)
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+				      u32 rdm_intr_moder_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr,
-			    rdm_int_rim_en_msk,
-			    rdm_int_rim_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_INT_RIM_EN_ADR,
+			    HW_ATL_RDM_INT_RIM_EN_MSK,
+			    HW_ATL_RDM_INT_RIM_EN_SHIFT,
 			    rdm_intr_moder_en);
 }
 
 /* reg */
-void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx)
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+				u32 regidx)
 {
-	aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map);
+	aq_hw_write_reg(aq_hw, HW_ATL_GEN_INTR_MAP_ADR(regidx), gen_intr_map);
 }
 
-u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, gen_intr_stat_adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_GEN_INTR_STAT_ADR);
 }
 
-void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
 {
-	aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl);
+	aq_hw_write_reg(aq_hw, HW_ATL_INTR_GLB_CTL_ADR, intr_glb_ctl);
 }
 
-void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
 {
-	aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr);
+	aq_hw_write_reg(aq_hw, HW_ATL_INTR_THR_ADR(throttle), intr_thr);
 }
 
-void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
-					u32 rx_dma_desc_base_addrlsw,
-					u32 descriptor)
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+					       u32 rx_dma_desc_base_addrlsw,
+					       u32 descriptor)
 {
-	aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor),
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
 			rx_dma_desc_base_addrlsw);
 }
 
-void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
-					u32 rx_dma_desc_base_addrmsw,
-					u32 descriptor)
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+					       u32 rx_dma_desc_base_addrmsw,
+					       u32 descriptor)
 {
-	aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor),
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
 			rx_dma_desc_base_addrmsw);
 }
 
-u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
 {
-	return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor));
+	return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor));
 }
 
-void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
-				  u32 rx_dma_desc_tail_ptr, u32 descriptor)
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+					 u32 rx_dma_desc_tail_ptr,
+					 u32 descriptor)
 {
-	aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor),
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor),
 			rx_dma_desc_tail_ptr);
 }
 
-void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk)
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+					u32 rx_flr_mcst_flr_msk)
 {
-	aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk);
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_MSK_ADR,
+			rx_flr_mcst_flr_msk);
 }
 
-void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
-			     u32 filter)
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+				    u32 filter)
 {
-	aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr);
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_MCST_FLR_ADR(filter),
+			rx_flr_mcst_flr);
 }
 
-void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1)
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+				       u32 rx_flr_rss_control1)
 {
-	aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1);
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_RSS_CONTROL1_ADR,
+			rx_flr_rss_control1);
 }
 
-void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2)
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw,
+				    u32 rx_filter_control2)
 {
-	aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2);
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_FLR_CONTROL2_ADR, rx_filter_control2);
 }
 
-void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				u32 rx_intr_moderation_ctl,
-				u32 queue)
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+				       u32 rx_intr_moderation_ctl,
+				       u32 queue)
 {
-	aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue),
+	aq_hw_write_reg(aq_hw, HW_ATL_RX_INTR_MODERATION_CTL_ADR(queue),
 			rx_intr_moderation_ctl);
 }
 
-void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl)
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+				     u32 tx_dma_debug_ctl)
 {
-	aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl);
+	aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DEBUG_CTL_ADR, tx_dma_debug_ctl);
 }
 
-void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
-					u32 tx_dma_desc_base_addrlsw,
-					u32 descriptor)
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+					       u32 tx_dma_desc_base_addrlsw,
+					       u32 descriptor)
 {
-	aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor),
+	aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor),
 			tx_dma_desc_base_addrlsw);
 }
 
-void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
-					u32 tx_dma_desc_base_addrmsw,
-					u32 descriptor)
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+					       u32 tx_dma_desc_base_addrmsw,
+					       u32 descriptor)
 {
-	aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor),
+	aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor),
 			tx_dma_desc_base_addrmsw);
 }
 
-void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
-				  u32 tx_dma_desc_tail_ptr, u32 descriptor)
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+					 u32 tx_dma_desc_tail_ptr,
+					 u32 descriptor)
 {
-	aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor),
+	aq_hw_write_reg(aq_hw, HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor),
 			tx_dma_desc_tail_ptr);
 }
 
-void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				u32 tx_intr_moderation_ctl,
-				u32 queue)
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+				       u32 tx_intr_moderation_ctl,
+				       u32 queue)
 {
-	aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue),
+	aq_hw_write_reg(aq_hw, HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue),
 			tx_intr_moderation_ctl);
 }
 
 /* RPB: rx packet buffer */
-void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr,
-			    rpb_dma_sys_lbk_msk,
-			    rpb_dma_sys_lbk_shift, dma_sys_lbk);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_DMA_SYS_LBK_ADR,
+			    HW_ATL_RPB_DMA_SYS_LBK_MSK,
+			    HW_ATL_RPB_DMA_SYS_LBK_SHIFT, dma_sys_lbk);
 }
 
-void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
-				    u32 rx_traf_class_mode)
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+					   u32 rx_traf_class_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr,
-			    rpb_rpf_rx_tc_mode_msk,
-			    rpb_rpf_rx_tc_mode_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
+			    HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
+			    HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT,
 			    rx_traf_class_mode);
 }
 
-void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk,
-			    rpb_rx_buf_en_shift, rx_buff_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
+			    HW_ATL_RPB_RX_BUF_EN_MSK,
+			    HW_ATL_RPB_RX_BUF_EN_SHIFT, rx_buff_en);
 }
 
-void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 rx_buff_hi_threshold_per_tc,
-					 u32 buffer)
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 rx_buff_hi_threshold_per_tc,
+						u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer),
-			    rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBHI_THRESH_ADR(buffer),
+			    HW_ATL_RPB_RXBHI_THRESH_MSK,
+			    HW_ATL_RPB_RXBHI_THRESH_SHIFT,
 			    rx_buff_hi_threshold_per_tc);
 }
 
-void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 rx_buff_lo_threshold_per_tc,
-					 u32 buffer)
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 rx_buff_lo_threshold_per_tc,
+						u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer),
-			    rpb_rxblo_thresh_msk,
-			    rpb_rxblo_thresh_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBLO_THRESH_ADR(buffer),
+			    HW_ATL_RPB_RXBLO_THRESH_MSK,
+			    HW_ATL_RPB_RXBLO_THRESH_SHIFT,
 			    rx_buff_lo_threshold_per_tc);
 }
 
-void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr,
-			    rpb_rx_fc_mode_msk,
-			    rpb_rx_fc_mode_shift, rx_flow_ctl_mode);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_FC_MODE_ADR,
+			    HW_ATL_RPB_RX_FC_MODE_MSK,
+			    HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
 }
 
-void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
-				     u32 rx_pkt_buff_size_per_tc, u32 buffer)
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+					    u32 rx_pkt_buff_size_per_tc, u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer),
-			    rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer),
+			    HW_ATL_RPB_RXBBUF_SIZE_MSK,
+			    HW_ATL_RPB_RXBBUF_SIZE_SHIFT,
 			    rx_pkt_buff_size_per_tc);
 }
 
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
-			       u32 buffer)
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+				      u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer),
-			    rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer),
+			    HW_ATL_RPB_RXBXOFF_EN_MSK,
+			    HW_ATL_RPB_RXBXOFF_EN_SHIFT,
 			    rx_xoff_en_per_tc);
 }
 
 /* rpf */
 
-void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
-					u32 l2broadcast_count_threshold)
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+					       u32 l2broadcast_count_threshold)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr,
-			    rpfl2bc_thresh_msk,
-			    rpfl2bc_thresh_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_THRESH_ADR,
+			    HW_ATL_RPFL2BC_THRESH_MSK,
+			    HW_ATL_RPFL2BC_THRESH_SHIFT,
 			    l2broadcast_count_threshold);
 }
 
-void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk,
-			    rpfl2bc_en_shift, l2broadcast_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_EN_ADR, HW_ATL_RPFL2BC_EN_MSK,
+			    HW_ATL_RPFL2BC_EN_SHIFT, l2broadcast_en);
 }
 
-void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act)
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+				       u32 l2broadcast_flr_act)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk,
-			    rpfl2bc_act_shift, l2broadcast_flr_act);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2BC_ACT_ADR,
+			    HW_ATL_RPFL2BC_ACT_MSK,
+			    HW_ATL_RPFL2BC_ACT_SHIFT, l2broadcast_flr_act);
 }
 
-void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
-			       u32 filter)
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+				      u32 l2multicast_flr_en,
+				      u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter),
-			    rpfl2mc_enf_msk,
-			    rpfl2mc_enf_shift, l2multicast_flr_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ENF_ADR(filter),
+			    HW_ATL_RPFL2MC_ENF_MSK,
+			    HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
 }
 
-void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
-				  u32 l2promiscuous_mode_en)
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+					 u32 l2promiscuous_mode_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr,
-			    rpfl2promis_mode_msk,
-			    rpfl2promis_mode_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
+			    HW_ATL_RPFL2PROMIS_MODE_MSK,
+			    HW_ATL_RPFL2PROMIS_MODE_SHIFT,
 			    l2promiscuous_mode_en);
 }
 
-void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
-			      u32 filter)
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+				     u32 l2unicast_flr_act,
+				     u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter),
-			    rpfl2uc_actf_msk, rpfl2uc_actf_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ACTF_ADR(filter),
+			    HW_ATL_RPFL2UC_ACTF_MSK, HW_ATL_RPFL2UC_ACTF_SHIFT,
 			    l2unicast_flr_act);
 }
 
-void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
-			 u32 filter)
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+				u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter),
-			    rpfl2uc_enf_msk,
-			    rpfl2uc_enf_shift, l2unicast_flr_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_ENF_ADR(filter),
+			    HW_ATL_RPFL2UC_ENF_MSK,
+			    HW_ATL_RPFL2UC_ENF_SHIFT, l2unicast_flr_en);
 }
 
-void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
-				      u32 l2unicast_dest_addresslsw,
-				      u32 filter)
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+					     u32 l2unicast_dest_addresslsw,
+					     u32 filter)
 {
-	aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter),
+	aq_hw_write_reg(aq_hw, HW_ATL_RPFL2UC_DAFLSW_ADR(filter),
 			l2unicast_dest_addresslsw);
 }
 
-void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
-				      u32 l2unicast_dest_addressmsw,
-				      u32 filter)
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+					     u32 l2unicast_dest_addressmsw,
+					     u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter),
-			    rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2UC_DAFMSW_ADR(filter),
+			    HW_ATL_RPFL2UC_DAFMSW_MSK,
+			    HW_ATL_RPFL2UC_DAFMSW_SHIFT,
 			    l2unicast_dest_addressmsw);
 }
 
-void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
-				     u32 l2_accept_all_mc_packets)
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+					    u32 l2_accept_all_mc_packets)
 {
-	aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr,
-			    rpfl2mc_accept_all_msk,
-			    rpfl2mc_accept_all_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPFL2MC_ACCEPT_ALL_ADR,
+			    HW_ATL_RPFL2MC_ACCEPT_ALL_MSK,
+			    HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT,
 			    l2_accept_all_mc_packets);
 }
 
-void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
-				      u32 user_priority_tc_map, u32 tc)
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+					     u32 user_priority_tc_map, u32 tc)
 {
 /* register address for bitfield rx_tc_up{t}[2:0] */
 	static u32 rpf_rpb_rx_tc_upt_adr[8] = {
-			0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U,
-			0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U
+			0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U,
+			0x000054c4U, 0x000054C4U, 0x000054C4U, 0x000054C4U
 		};
 
 /* bitmask for bitfield rx_tc_up{t}[2:0] */
@@ -711,273 +750,290 @@ void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
 			    user_priority_tc_map);
 }
 
-void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr,
-			    rpf_rss_key_addr_msk,
-			    rpf_rss_key_addr_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_ADDR_ADR,
+			    HW_ATL_RPF_RSS_KEY_ADDR_MSK,
+			    HW_ATL_RPF_RSS_KEY_ADDR_SHIFT,
 			    rss_key_addr);
 }
 
-void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
 {
-	aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr,
+	aq_hw_write_reg(aq_hw, HW_ATL_RPF_RSS_KEY_WR_DATA_ADR,
 			rss_key_wr_data);
 }
 
-u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
-				  rpf_rss_key_wr_eni_msk,
-				  rpf_rss_key_wr_eni_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+				  HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+				  HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT);
 }
 
-void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
-			    rpf_rss_key_wr_eni_msk,
-			    rpf_rss_key_wr_eni_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_KEY_WR_ENI_ADR,
+			    HW_ATL_RPF_RSS_KEY_WR_ENI_MSK,
+			    HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT,
 			    rss_key_wr_en);
 }
 
-void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr)
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+				       u32 rss_redir_tbl_addr)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr,
-			    rpf_rss_redir_addr_msk,
-			    rpf_rss_redir_addr_shift, rss_redir_tbl_addr);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_ADDR_ADR,
+			    HW_ATL_RPF_RSS_REDIR_ADDR_MSK,
+			    HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT,
+			    rss_redir_tbl_addr);
 }
 
-void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
-				   u32 rss_redir_tbl_wr_data)
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+					  u32 rss_redir_tbl_wr_data)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr,
-			    rpf_rss_redir_wr_data_msk,
-			    rpf_rss_redir_wr_data_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR,
+			    HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK,
+			    HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT,
 			    rss_redir_tbl_wr_data);
 }
 
-u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
-				  rpf_rss_redir_wr_eni_msk,
-				  rpf_rss_redir_wr_eni_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+				  HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+				  HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT);
 }
 
-void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
-			    rpf_rss_redir_wr_eni_msk,
-			    rpf_rss_redir_wr_eni_shift, rss_redir_wr_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR,
+			    HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK,
+			    HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT, rss_redir_wr_en);
 }
 
-void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk)
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+				       u32 tpo_to_rpf_sys_lbk)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr,
-			    rpf_tpo_rpf_sys_lbk_msk,
-			    rpf_tpo_rpf_sys_lbk_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR,
+			    HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK,
+			    HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT,
 			    tpo_to_rpf_sys_lbk);
 }
 
-void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr,
-			    rpf_vl_inner_tpid_msk,
-			    rpf_vl_inner_tpid_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_INNER_TPID_ADR,
+			    HW_ATL_RPF_VL_INNER_TPID_MSK,
+			    HW_ATL_RPF_VL_INNER_TPID_SHIFT,
 			    vlan_inner_etht);
 }
 
-void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr,
-			    rpf_vl_outer_tpid_msk,
-			    rpf_vl_outer_tpid_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_OUTER_TPID_ADR,
+			    HW_ATL_RPF_VL_OUTER_TPID_MSK,
+			    HW_ATL_RPF_VL_OUTER_TPID_SHIFT,
 			    vlan_outer_etht);
 }
 
-void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en)
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+				      u32 vlan_prom_mode_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr,
-			    rpf_vl_promis_mode_msk,
-			    rpf_vl_promis_mode_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
+			    HW_ATL_RPF_VL_PROMIS_MODE_MSK,
+			    HW_ATL_RPF_VL_PROMIS_MODE_SHIFT,
 			    vlan_prom_mode_en);
 }
 
-void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
-					  u32 vlan_accept_untagged_packets)
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+						 u32 vlan_acc_untagged_packets)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr,
-			    rpf_vl_accept_untagged_mode_msk,
-			    rpf_vl_accept_untagged_mode_shift,
-			    vlan_accept_untagged_packets);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR,
+			    HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK,
+			    HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT,
+			    vlan_acc_untagged_packets);
 }
 
-void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act)
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+				      u32 vlan_untagged_act)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr,
-			    rpf_vl_untagged_act_msk,
-			    rpf_vl_untagged_act_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_UNTAGGED_ACT_ADR,
+			    HW_ATL_RPF_VL_UNTAGGED_ACT_MSK,
+			    HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT,
 			    vlan_untagged_act);
 }
 
-void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter)
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
+				u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter),
-			    rpf_vl_en_f_msk,
-			    rpf_vl_en_f_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_EN_F_ADR(filter),
+			    HW_ATL_RPF_VL_EN_F_MSK,
+			    HW_ATL_RPF_VL_EN_F_SHIFT,
 			    vlan_flr_en);
 }
 
-void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter)
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act,
+				 u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter),
-			    rpf_vl_act_f_msk,
-			    rpf_vl_act_f_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ACT_F_ADR(filter),
+			    HW_ATL_RPF_VL_ACT_F_MSK,
+			    HW_ATL_RPF_VL_ACT_F_SHIFT,
 			    vlan_flr_act);
 }
 
-void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter)
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+				u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter),
-			    rpf_vl_id_f_msk,
-			    rpf_vl_id_f_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_ID_F_ADR(filter),
+			    HW_ATL_RPF_VL_ID_F_MSK,
+			    HW_ATL_RPF_VL_ID_F_SHIFT,
 			    vlan_id_flr);
 }
 
-void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter)
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+				u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter),
-			    rpf_et_enf_msk,
-			    rpf_et_enf_shift, etht_flr_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ENF_ADR(filter),
+			    HW_ATL_RPF_ET_ENF_MSK,
+			    HW_ATL_RPF_ET_ENF_SHIFT, etht_flr_en);
 }
 
-void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
-				   u32 etht_user_priority_en, u32 filter)
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+					  u32 etht_user_priority_en, u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter),
-			    rpf_et_upfen_msk, rpf_et_upfen_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPFEN_ADR(filter),
+			    HW_ATL_RPF_ET_UPFEN_MSK, HW_ATL_RPF_ET_UPFEN_SHIFT,
 			    etht_user_priority_en);
 }
 
-void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
-			      u32 filter)
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+				     u32 etht_rx_queue_en,
+				     u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter),
-			    rpf_et_rxqfen_msk, rpf_et_rxqfen_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQFEN_ADR(filter),
+			    HW_ATL_RPF_ET_RXQFEN_MSK,
+			    HW_ATL_RPF_ET_RXQFEN_SHIFT,
 			    etht_rx_queue_en);
 }
 
-void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
-				u32 filter)
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+				       u32 etht_user_priority,
+				       u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter),
-			    rpf_et_upf_msk,
-			    rpf_et_upf_shift, etht_user_priority);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_UPF_ADR(filter),
+			    HW_ATL_RPF_ET_UPF_MSK,
+			    HW_ATL_RPF_ET_UPF_SHIFT, etht_user_priority);
 }
 
-void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
-			   u32 filter)
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+				  u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter),
-			    rpf_et_rxqf_msk,
-			    rpf_et_rxqf_shift, etht_rx_queue);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_RXQF_ADR(filter),
+			    HW_ATL_RPF_ET_RXQF_MSK,
+			    HW_ATL_RPF_ET_RXQF_SHIFT, etht_rx_queue);
 }
 
-void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
-			    u32 filter)
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+				   u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter),
-			    rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_MNG_RXQF_ADR(filter),
+			    HW_ATL_RPF_ET_MNG_RXQF_MSK,
+			    HW_ATL_RPF_ET_MNG_RXQF_SHIFT,
 			    etht_mgt_queue);
 }
 
-void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter)
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+				 u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter),
-			    rpf_et_actf_msk,
-			    rpf_et_actf_shift, etht_flr_act);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_ACTF_ADR(filter),
+			    HW_ATL_RPF_ET_ACTF_MSK,
+			    HW_ATL_RPF_ET_ACTF_SHIFT, etht_flr_act);
 }
 
-void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
 {
-	aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter),
-			    rpf_et_valf_msk,
-			    rpf_et_valf_shift, etht_flr);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_ET_VALF_ADR(filter),
+			    HW_ATL_RPF_ET_VALF_MSK,
+			    HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
 }
 
 /* RPO: rx packet offload */
-void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				       u32 ipv4header_crc_offload_en)
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					      u32 ipv4header_crc_offload_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr,
-			    rpo_ipv4chk_en_msk,
-			    rpo_ipv4chk_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_IPV4CHK_EN_ADR,
+			    HW_ATL_RPO_IPV4CHK_EN_MSK,
+			    HW_ATL_RPO_IPV4CHK_EN_SHIFT,
 			    ipv4header_crc_offload_en);
 }
 
-void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_vlan_stripping, u32 descriptor)
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_vlan_stripping,
+					   u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor),
-			    rpo_descdvl_strip_msk,
-			    rpo_descdvl_strip_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor),
+			    HW_ATL_RPO_DESCDVL_STRIP_MSK,
+			    HW_ATL_RPO_DESCDVL_STRIP_SHIFT,
 			    rx_desc_vlan_stripping);
 }
 
-void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				    u32 tcp_udp_crc_offload_en)
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					   u32 tcp_udp_crc_offload_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk,
-			    rpol4chk_en_shift, tcp_udp_crc_offload_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPOL4CHK_EN_ADR,
+			    HW_ATL_RPOL4CHK_EN_MSK,
+			    HW_ATL_RPOL4CHK_EN_SHIFT, tcp_udp_crc_offload_en);
 }
 
-void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
 {
-	aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en);
+	aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_EN_ADR, lro_en);
 }
 
-void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
-				       u32 lro_patch_optimization_en)
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+					      u32 lro_patch_optimization_en)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr,
-			    rpo_lro_ptopt_en_msk,
-			    rpo_lro_ptopt_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PTOPT_EN_ADR,
+			    HW_ATL_RPO_LRO_PTOPT_EN_MSK,
+			    HW_ATL_RPO_LRO_PTOPT_EN_SHIFT,
 			    lro_patch_optimization_en);
 }
 
-void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
-			       u32 lro_qsessions_lim)
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+				      u32 lro_qsessions_lim)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr,
-			    rpo_lro_qses_lmt_msk,
-			    rpo_lro_qses_lmt_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_QSES_LMT_ADR,
+			    HW_ATL_RPO_LRO_QSES_LMT_MSK,
+			    HW_ATL_RPO_LRO_QSES_LMT_SHIFT,
 			    lro_qsessions_lim);
 }
 
-void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim)
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+				       u32 lro_total_desc_lim)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr,
-			    rpo_lro_tot_dsc_lmt_msk,
-			    rpo_lro_tot_dsc_lmt_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR,
+			    HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK,
+			    HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT,
 			    lro_total_desc_lim);
 }
 
-void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
-				      u32 lro_min_pld_of_first_pkt)
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+					     u32 lro_min_pld_of_first_pkt)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr,
-			    rpo_lro_pkt_min_msk,
-			    rpo_lro_pkt_min_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_PKT_MIN_ADR,
+			    HW_ATL_RPO_LRO_PKT_MIN_MSK,
+			    HW_ATL_RPO_LRO_PKT_MIN_SHIFT,
 			    lro_min_pld_of_first_pkt);
 }
 
-void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
 {
-	aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim);
+	aq_hw_write_reg(aq_hw, HW_ATL_RPO_LRO_RSC_MAX_ADR, lro_pkt_lim);
 }
 
-void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
-					u32 lro_max_number_of_descriptors,
-					u32 lro)
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+					       u32 lro_max_number_of_descriptors,
+					       u32 lro)
 {
 /* Register address for bitfield lro{L}_des_max[1:0] */
 	static u32 rpo_lro_ldes_max_adr[32] = {
@@ -1017,378 +1073,390 @@ void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
 			    lro_max_number_of_descriptors);
 }
 
-void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
-				   u32 lro_time_base_divider)
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+					  u32 lro_time_base_divider)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr,
-			    rpo_lro_tb_div_msk,
-			    rpo_lro_tb_div_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_TB_DIV_ADR,
+			    HW_ATL_RPO_LRO_TB_DIV_MSK,
+			    HW_ATL_RPO_LRO_TB_DIV_SHIFT,
 			    lro_time_base_divider);
 }
 
-void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
-				   u32 lro_inactive_interval)
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+					  u32 lro_inactive_interval)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr,
-			    rpo_lro_ina_ival_msk,
-			    rpo_lro_ina_ival_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_INA_IVAL_ADR,
+			    HW_ATL_RPO_LRO_INA_IVAL_MSK,
+			    HW_ATL_RPO_LRO_INA_IVAL_SHIFT,
 			    lro_inactive_interval);
 }
 
-void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
-					 u32 lro_max_coalescing_interval)
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+						u32 lro_max_coal_interval)
 {
-	aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr,
-			    rpo_lro_max_ival_msk,
-			    rpo_lro_max_ival_shift,
-			    lro_max_coalescing_interval);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RPO_LRO_MAX_IVAL_ADR,
+			    HW_ATL_RPO_LRO_MAX_IVAL_MSK,
+			    HW_ATL_RPO_LRO_MAX_IVAL_SHIFT,
+			    lro_max_coal_interval);
 }
 
 /* rx */
-void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
 {
-	aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr,
-			    rx_reg_res_dsbl_msk,
-			    rx_reg_res_dsbl_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_RX_REG_RES_DSBL_ADR,
+			    HW_ATL_RX_REG_RES_DSBL_MSK,
+			    HW_ATL_RX_REG_RES_DSBL_SHIFT,
 			    rx_reg_res_dis);
 }
 
 /* tdm */
-void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca),
-			    tdm_dcadcpuid_msk,
-			    tdm_dcadcpuid_shift, cpuid);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADCPUID_ADR(dca),
+			    HW_ATL_TDM_DCADCPUID_MSK,
+			    HW_ATL_TDM_DCADCPUID_SHIFT, cpuid);
 }
 
-void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
-				   u32 large_send_offload_en)
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+					  u32 large_send_offload_en)
 {
-	aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en);
+	aq_hw_write_reg(aq_hw, HW_ATL_TDM_LSO_EN_ADR, large_send_offload_en);
 }
 
-void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk,
-			    tdm_dca_en_shift, tx_dca_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_EN_ADR, HW_ATL_TDM_DCA_EN_MSK,
+			    HW_ATL_TDM_DCA_EN_SHIFT, tx_dca_en);
 }
 
-void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk,
-			    tdm_dca_mode_shift, tx_dca_mode);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCA_MODE_ADR,
+			    HW_ATL_TDM_DCA_MODE_MSK,
+			    HW_ATL_TDM_DCA_MODE_SHIFT, tx_dca_mode);
 }
 
-void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca)
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+				   u32 dca)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca),
-			    tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DCADDESC_EN_ADR(dca),
+			    HW_ATL_TDM_DCADDESC_EN_MSK,
+			    HW_ATL_TDM_DCADDESC_EN_SHIFT,
 			    tx_desc_dca_en);
 }
 
-void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor)
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+			       u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor),
-			    tdm_descden_msk,
-			    tdm_descden_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDEN_ADR(descriptor),
+			    HW_ATL_TDM_DESCDEN_MSK,
+			    HW_ATL_TDM_DESCDEN_SHIFT,
 			    tx_desc_en);
 }
 
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
 {
-	return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor),
-				  tdm_descdhd_msk, tdm_descdhd_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_TDM_DESCDHD_ADR(descriptor),
+				  HW_ATL_TDM_DESCDHD_MSK,
+				  HW_ATL_TDM_DESCDHD_SHIFT);
 }
 
-void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
-			 u32 descriptor)
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+				u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor),
-			    tdm_descdlen_msk,
-			    tdm_descdlen_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDLEN_ADR(descriptor),
+			    HW_ATL_TDM_DESCDLEN_MSK,
+			    HW_ATL_TDM_DESCDLEN_SHIFT,
 			    tx_desc_len);
 }
 
-void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
-				  u32 tx_desc_wr_wb_irq_en)
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+					 u32 tx_desc_wr_wb_irq_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr,
-			    tdm_int_desc_wrb_en_msk,
-			    tdm_int_desc_wrb_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_DESC_WRB_EN_ADR,
+			    HW_ATL_TDM_INT_DESC_WRB_EN_MSK,
+			    HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT,
 			    tx_desc_wr_wb_irq_en);
 }
 
-void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
-				     u32 tx_desc_wr_wb_threshold,
-				     u32 descriptor)
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+					    u32 tx_desc_wr_wb_threshold,
+					    u32 descriptor)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor),
-			    tdm_descdwrb_thresh_msk,
-			    tdm_descdwrb_thresh_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor),
+			    HW_ATL_TDM_DESCDWRB_THRESH_MSK,
+			    HW_ATL_TDM_DESCDWRB_THRESH_SHIFT,
 			    tx_desc_wr_wb_threshold);
 }
 
-void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
-			       u32 tdm_irq_moderation_en)
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+				      u32 tdm_irq_moderation_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr,
-			    tdm_int_mod_en_msk,
-			    tdm_int_mod_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TDM_INT_MOD_EN_ADR,
+			    HW_ATL_TDM_INT_MOD_EN_MSK,
+			    HW_ATL_TDM_INT_MOD_EN_SHIFT,
 			    tdm_irq_moderation_en);
 }
 
 /* thm */
-void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
-				       u32 lso_tcp_flag_of_first_pkt)
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+					      u32 lso_tcp_flag_of_first_pkt)
 {
-	aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr,
-			    thm_lso_tcp_flag_first_msk,
-			    thm_lso_tcp_flag_first_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR,
+			    HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK,
+			    HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT,
 			    lso_tcp_flag_of_first_pkt);
 }
 
-void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
-				      u32 lso_tcp_flag_of_last_pkt)
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+					     u32 lso_tcp_flag_of_last_pkt)
 {
-	aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr,
-			    thm_lso_tcp_flag_last_msk,
-			    thm_lso_tcp_flag_last_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR,
+			    HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK,
+			    HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT,
 			    lso_tcp_flag_of_last_pkt);
 }
 
-void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
-					u32 lso_tcp_flag_of_middle_pkt)
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+					       u32 lso_tcp_flag_of_middle_pkt)
 {
-	aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr,
-			    thm_lso_tcp_flag_mid_msk,
-			    thm_lso_tcp_flag_mid_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_THM_LSO_TCP_FLAG_MID_ADR,
+			    HW_ATL_THM_LSO_TCP_FLAG_MID_MSK,
+			    HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT,
 			    lso_tcp_flag_of_middle_pkt);
 }
 
 /* TPB: tx packet buffer */
-void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk,
-			    tpb_tx_buf_en_shift, tx_buff_en);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_BUF_EN_ADR,
+			    HW_ATL_TPB_TX_BUF_EN_MSK,
+			    HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
 }
 
-void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 tx_buff_hi_threshold_per_tc,
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 tx_buff_hi_threshold_per_tc,
 					 u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer),
-			    tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBHI_THRESH_ADR(buffer),
+			    HW_ATL_TPB_TXBHI_THRESH_MSK,
+			    HW_ATL_TPB_TXBHI_THRESH_SHIFT,
 			    tx_buff_hi_threshold_per_tc);
 }
 
-void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 tx_buff_lo_threshold_per_tc,
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 tx_buff_lo_threshold_per_tc,
 					 u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer),
-			    tpb_txblo_thresh_msk, tpb_txblo_thresh_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBLO_THRESH_ADR(buffer),
+			    HW_ATL_TPB_TXBLO_THRESH_MSK,
+			    HW_ATL_TPB_TXBLO_THRESH_SHIFT,
 			    tx_buff_lo_threshold_per_tc);
 }
 
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr,
-			    tpb_dma_sys_lbk_msk,
-			    tpb_dma_sys_lbk_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_DMA_SYS_LBK_ADR,
+			    HW_ATL_TPB_DMA_SYS_LBK_MSK,
+			    HW_ATL_TPB_DMA_SYS_LBK_SHIFT,
 			    tx_dma_sys_lbk_en);
 }
 
-void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
-				     u32 tx_pkt_buff_size_per_tc, u32 buffer)
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+					    u32 tx_pkt_buff_size_per_tc, u32 buffer)
 {
-	aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer),
-			    tpb_txbbuf_size_msk,
-			    tpb_txbbuf_size_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer),
+			    HW_ATL_TPB_TXBBUF_SIZE_MSK,
+			    HW_ATL_TPB_TXBBUF_SIZE_SHIFT,
 			    tx_pkt_buff_size_per_tc);
 }
 
-void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr,
-			    tpb_tx_scp_ins_en_msk,
-			    tpb_tx_scp_ins_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_SCP_INS_EN_ADR,
+			    HW_ATL_TPB_TX_SCP_INS_EN_MSK,
+			    HW_ATL_TPB_TX_SCP_INS_EN_SHIFT,
 			    tx_path_scp_ins_en);
 }
 
 /* TPO: tx packet offload */
-void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				       u32 ipv4header_crc_offload_en)
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					      u32 ipv4header_crc_offload_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr,
-			    tpo_ipv4chk_en_msk,
-			    tpo_ipv4chk_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_IPV4CHK_EN_ADR,
+			    HW_ATL_TPO_IPV4CHK_EN_MSK,
+			    HW_ATL_TPO_IPV4CHK_EN_SHIFT,
 			    ipv4header_crc_offload_en);
 }
 
-void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				    u32 tcp_udp_crc_offload_en)
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					   u32 tcp_udp_crc_offload_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr,
-			    tpol4chk_en_msk,
-			    tpol4chk_en_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPOL4CHK_EN_ADR,
+			    HW_ATL_TPOL4CHK_EN_MSK,
+			    HW_ATL_TPOL4CHK_EN_SHIFT,
 			    tcp_udp_crc_offload_en);
 }
 
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en)
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+				      u32 tx_pkt_sys_lbk_en)
 {
-	aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr,
-			    tpo_pkt_sys_lbk_msk,
-			    tpo_pkt_sys_lbk_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPO_PKT_SYS_LBK_ADR,
+			    HW_ATL_TPO_PKT_SYS_LBK_MSK,
+			    HW_ATL_TPO_PKT_SYS_LBK_SHIFT,
 			    tx_pkt_sys_lbk_en);
 }
 
 /* TPS: tx packet scheduler */
-void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
-				       u32 tx_pkt_shed_data_arb_mode)
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+					      u32 tx_pkt_shed_data_arb_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr,
-			    tps_data_tc_arb_mode_msk,
-			    tps_data_tc_arb_mode_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TC_ARB_MODE_ADR,
+			    HW_ATL_TPS_DATA_TC_ARB_MODE_MSK,
+			    HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT,
 			    tx_pkt_shed_data_arb_mode);
 }
 
-void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
-						 u32 curr_time_res)
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+							u32 curr_time_res)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr,
-			    tps_desc_rate_ta_rst_msk,
-			    tps_desc_rate_ta_rst_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_TA_RST_ADR,
+			    HW_ATL_TPS_DESC_RATE_TA_RST_MSK,
+			    HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT,
 			    curr_time_res);
 }
 
-void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
-				       u32 tx_pkt_shed_desc_rate_lim)
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+					      u32 tx_pkt_shed_desc_rate_lim)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr,
-			    tps_desc_rate_lim_msk,
-			    tps_desc_rate_lim_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_LIM_ADR,
+			    HW_ATL_TPS_DESC_RATE_LIM_MSK,
+			    HW_ATL_TPS_DESC_RATE_LIM_SHIFT,
 			    tx_pkt_shed_desc_rate_lim);
 }
 
-void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
-					  u32 tx_pkt_shed_desc_tc_arb_mode)
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+						 u32 arb_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr,
-			    tps_desc_tc_arb_mode_msk,
-			    tps_desc_tc_arb_mode_shift,
-			    tx_pkt_shed_desc_tc_arb_mode);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TC_ARB_MODE_ADR,
+			    HW_ATL_TPS_DESC_TC_ARB_MODE_MSK,
+			    HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT,
+			    arb_mode);
 }
 
-void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
-					    u32 tx_pkt_shed_desc_tc_max_credit,
-					    u32 tc)
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+						   u32 max_credit,
+						   u32 tc)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc),
-			    tps_desc_tctcredit_max_msk,
-			    tps_desc_tctcredit_max_shift,
-			    tx_pkt_shed_desc_tc_max_credit);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc),
+			    HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK,
+			    HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT,
+			    max_credit);
 }
 
-void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
-					u32 tx_pkt_shed_desc_tc_weight, u32 tc)
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+					       u32 tx_pkt_shed_desc_tc_weight,
+					       u32 tc)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc),
-			    tps_desc_tctweight_msk,
-			    tps_desc_tctweight_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc),
+			    HW_ATL_TPS_DESC_TCTWEIGHT_MSK,
+			    HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT,
 			    tx_pkt_shed_desc_tc_weight);
 }
 
-void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
-					  u32 tx_pkt_shed_desc_vm_arb_mode)
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+						 u32 arb_mode)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr,
-			    tps_desc_vm_arb_mode_msk,
-			    tps_desc_vm_arb_mode_shift,
-			    tx_pkt_shed_desc_vm_arb_mode);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_VM_ARB_MODE_ADR,
+			    HW_ATL_TPS_DESC_VM_ARB_MODE_MSK,
+			    HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT,
+			    arb_mode);
 }
 
-void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
-					    u32 tx_pkt_shed_tc_data_max_credit,
-					    u32 tc)
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+						   u32 max_credit,
+						   u32 tc)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc),
-			    tps_data_tctcredit_max_msk,
-			    tps_data_tctcredit_max_shift,
-			    tx_pkt_shed_tc_data_max_credit);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
+			    HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK,
+			    HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT,
+			    max_credit);
 }
 
-void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
-					u32 tx_pkt_shed_tc_data_weight, u32 tc)
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+					       u32 tx_pkt_shed_tc_data_weight,
+					       u32 tc)
 {
-	aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc),
-			    tps_data_tctweight_msk,
-			    tps_data_tctweight_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc),
+			    HW_ATL_TPS_DATA_TCTWEIGHT_MSK,
+			    HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT,
 			    tx_pkt_shed_tc_data_weight);
 }
 
 /* tx */
-void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
 {
-	aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr,
-			    tx_reg_res_dsbl_msk,
-			    tx_reg_res_dsbl_shift, tx_reg_res_dis);
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_TX_REG_RES_DSBL_ADR,
+			    HW_ATL_TX_REG_RES_DSBL_MSK,
+			    HW_ATL_TX_REG_RES_DSBL_SHIFT, tx_reg_res_dis);
 }
 
 /* msm */
-u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr,
-				  msm_reg_access_busy_msk,
-				  msm_reg_access_busy_shift);
+	return aq_hw_read_reg_bit(aq_hw, HW_ATL_MSM_REG_ACCESS_BUSY_ADR,
+				  HW_ATL_MSM_REG_ACCESS_BUSY_MSK,
+				  HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT);
 }
 
-void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
-					u32 reg_addr_for_indirect_addr)
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+					       u32 reg_addr_for_indirect_addr)
 {
-	aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr,
-			    msm_reg_addr_msk,
-			    msm_reg_addr_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_ADDR_ADR,
+			    HW_ATL_MSM_REG_ADDR_MSK,
+			    HW_ATL_MSM_REG_ADDR_SHIFT,
 			    reg_addr_for_indirect_addr);
 }
 
-void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
 {
-	aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr,
-			    msm_reg_rd_strobe_msk,
-			    msm_reg_rd_strobe_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_RD_STROBE_ADR,
+			    HW_ATL_MSM_REG_RD_STROBE_MSK,
+			    HW_ATL_MSM_REG_RD_STROBE_SHIFT,
 			    reg_rd_strobe);
 }
 
-u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
 {
-	return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr);
+	return aq_hw_read_reg(aq_hw, HW_ATL_MSM_REG_RD_DATA_ADR);
 }
 
-void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
 {
-	aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data);
+	aq_hw_write_reg(aq_hw, HW_ATL_MSM_REG_WR_DATA_ADR, reg_wr_data);
 }
 
-void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
 {
-	aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr,
-			    msm_reg_wr_strobe_msk,
-			    msm_reg_wr_strobe_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_MSM_REG_WR_STROBE_ADR,
+			    HW_ATL_MSM_REG_WR_STROBE_MSK,
+			    HW_ATL_MSM_REG_WR_STROBE_SHIFT,
 			    reg_wr_strobe);
 }
 
 /* pci */
-void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
 {
-	aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr,
-			    pci_reg_res_dsbl_msk,
-			    pci_reg_res_dsbl_shift,
+	aq_hw_write_reg_bit(aq_hw, HW_ATL_PCI_REG_RES_DSBL_ADR,
+			    HW_ATL_PCI_REG_RES_DSBL_MSK,
+			    HW_ATL_PCI_REG_RES_DSBL_SHIFT,
 			    pci_reg_res_dis);
 }
 
-void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp,
-				 u32 scratch_scp)
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+					u32 glb_cpu_scratch_scp,
+					u32 scratch_scp)
 {
-	aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp),
+	aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp),
 			glb_cpu_scratch_scp);
 }
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index ed1085b..dfb426f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -21,657 +21,681 @@ struct aq_hw_s;
 /* global */
 
 /* set global microprocessor semaphore */
-void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw,	u32 glb_cpu_sem,
-			 u32 semaphore);
+void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw,	u32 glb_cpu_sem,
+				u32 semaphore);
 
 /* get global microprocessor semaphore */
-u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
+u32 hw_atl_reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
 
 /* set global register reset disable */
-void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
+void hw_atl_glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
 
 /* set soft reset */
-void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
+void hw_atl_glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
 
 /* get soft reset */
-u32 glb_soft_res_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
 
 /* stats */
 
-u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get rx dma good octet counter lsw */
-u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
 
 /* get rx dma good packet counter lsw */
-u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
 
 /* get tx dma good octet counter lsw */
-u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
 
 /* get tx dma good packet counter lsw */
-u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
 
 /* get rx dma good octet counter msw */
-u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
 
 /* get rx dma good packet counter msw */
-u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
 
 /* get tx dma good octet counter msw */
-u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
 
 /* get tx dma good packet counter msw */
-u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
 
 /* get msm rx errors counter register */
-u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm rx unicast frames counter register */
-u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm rx multicast frames counter register */
-u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm rx broadcast frames counter register */
-u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm rx broadcast octets counter register 1 */
-u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
 
 /* get msm rx unicast octets counter register 0 */
-u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
 
 /* get rx dma statistics counter 7 */
-u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
 
 /* get msm tx errors counter register */
-u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm tx unicast frames counter register */
-u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm tx multicast frames counter register */
-u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm tx broadcast frames counter register */
-u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
 
 /* get msm tx multicast octets counter register 1 */
-u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
 
 /* get msm tx broadcast octets counter register 1 */
-u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
 
 /* get msm tx unicast octets counter register 0 */
-u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
 
 /* get global mif identification */
-u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
 
 /* interrupt */
 
 /* set interrupt auto mask lsw */
-void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw);
+void hw_atl_itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw,
+				     u32 irq_auto_masklsw);
 
 /* set interrupt mapping enable rx */
-void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx);
+void hw_atl_itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx,
+				  u32 rx);
 
 /* set interrupt mapping enable tx */
-void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx);
+void hw_atl_itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx,
+				  u32 tx);
 
 /* set interrupt mapping rx */
-void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
+void hw_atl_itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
 
 /* set interrupt mapping tx */
-void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
+void hw_atl_itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
 
 /* set interrupt mask clear lsw */
-void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw);
+void hw_atl_itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw,
+				     u32 irq_msk_clearlsw);
 
 /* set interrupt mask set lsw */
-void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
+void hw_atl_itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
 
 /* set interrupt register reset disable */
-void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
+void hw_atl_itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
 
 /* set interrupt status clear lsw */
-void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
-				 u32 irq_status_clearlsw);
+void hw_atl_itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+					u32 irq_status_clearlsw);
 
 /* get interrupt status lsw */
-u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
 
 /* get reset interrupt */
-u32 itr_res_irq_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
 
 /* set reset interrupt */
-void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
 
 /* rdm */
 
 /* set cpu id */
-void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
 
 /* set rx dca enable */
-void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
+void hw_atl_rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
 
 /* set rx dca mode */
-void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
+void hw_atl_rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
 
 /* set rx descriptor data buffer size */
-void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_data_buff_size,
+void hw_atl_rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_data_buff_size,
 				    u32 descriptor);
 
 /* set rx descriptor dca enable */
-void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
-			    u32 dca);
+void hw_atl_rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+				   u32 dca);
 
 /* set rx descriptor enable */
-void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
-			u32 descriptor);
+void hw_atl_rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+			       u32 descriptor);
 
 /* set rx descriptor header splitting */
-void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_head_splitting,
+void hw_atl_rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_head_splitting,
 				    u32 descriptor);
 
 /* get rx descriptor head pointer */
-u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
 
 /* set rx descriptor length */
-void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
-			 u32 descriptor);
+void hw_atl_rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+				u32 descriptor);
 
 /* set rx descriptor write-back interrupt enable */
-void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
-				  u32 rx_desc_wr_wb_irq_en);
+void hw_atl_rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+					 u32 rx_desc_wr_wb_irq_en);
 
 /* set rx header dca enable */
-void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
-			    u32 dca);
+void hw_atl_rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+				   u32 dca);
 
 /* set rx payload dca enable */
-void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca);
+void hw_atl_rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en,
+				  u32 dca);
 
 /* set rx descriptor header buffer size */
-void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_head_buff_size,
-				    u32 descriptor);
+void hw_atl_rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_head_buff_size,
+					   u32 descriptor);
 
 /* set rx descriptor reset */
-void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
-			 u32 descriptor);
+void hw_atl_rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+				u32 descriptor);
 
 /* Set RDM Interrupt Moderation Enable */
-void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en);
+void hw_atl_rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+				      u32 rdm_intr_moder_en);
 
 /* reg */
 
 /* set general interrupt mapping register */
-void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx);
+void hw_atl_reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map,
+				u32 regidx);
 
 /* get general interrupt status register */
-u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
 
 /* set interrupt global control register */
-void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
+void hw_atl_reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
 
 /* set interrupt throttle register */
-void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
+void hw_atl_reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
 
 /* set rx dma descriptor base address lsw */
-void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
-					u32 rx_dma_desc_base_addrlsw,
+void hw_atl_reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+					       u32 rx_dma_desc_base_addrlsw,
 					u32 descriptor);
 
 /* set rx dma descriptor base address msw */
-void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
-					u32 rx_dma_desc_base_addrmsw,
+void hw_atl_reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+					       u32 rx_dma_desc_base_addrmsw,
 					u32 descriptor);
 
 /* get rx dma descriptor status register */
-u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
 
 /* set rx dma descriptor tail pointer register */
-void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
-				  u32 rx_dma_desc_tail_ptr,
+void hw_atl_reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+					 u32 rx_dma_desc_tail_ptr,
 				  u32 descriptor);
 
 /* set rx filter multicast filter mask register */
-void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
-				 u32 rx_flr_mcst_flr_msk);
+void hw_atl_reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+					u32 rx_flr_mcst_flr_msk);
 
 /* set rx filter multicast filter register */
-void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
-			     u32 filter);
+void hw_atl_reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+				    u32 filter);
 
 /* set rx filter rss control register 1 */
-void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
-				u32 rx_flr_rss_control1);
+void hw_atl_reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+				       u32 rx_flr_rss_control1);
 
 /* Set RX Filter Control Register 2 */
-void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
+void hw_atl_reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
 
 /* Set RX Interrupt Moderation Control Register */
-void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				u32 rx_intr_moderation_ctl,
+void hw_atl_reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+				       u32 rx_intr_moderation_ctl,
 				u32 queue);
 
 /* set tx dma debug control */
-void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl);
+void hw_atl_reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw,
+				     u32 tx_dma_debug_ctl);
 
 /* set tx dma descriptor base address lsw */
-void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
-					u32 tx_dma_desc_base_addrlsw,
+void hw_atl_reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+					       u32 tx_dma_desc_base_addrlsw,
 					u32 descriptor);
 
 /* set tx dma descriptor base address msw */
-void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
-					u32 tx_dma_desc_base_addrmsw,
+void hw_atl_reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+					       u32 tx_dma_desc_base_addrmsw,
 					u32 descriptor);
 
 /* set tx dma descriptor tail pointer register */
-void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
-				  u32 tx_dma_desc_tail_ptr,
-				  u32 descriptor);
+void hw_atl_reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+					 u32 tx_dma_desc_tail_ptr,
+					 u32 descriptor);
 
 /* Set TX Interrupt Moderation Control Register */
-void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
-				u32 tx_intr_moderation_ctl,
-				u32 queue);
+void hw_atl_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+				       u32 tx_intr_moderation_ctl,
+				       u32 queue);
 
 /* set global microprocessor scratch pad */
-void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
-				 u32 glb_cpu_scratch_scp, u32 scratch_scp);
+void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+					u32 glb_cpu_scratch_scp,
+					u32 scratch_scp);
 
 /* rpb */
 
 /* set dma system loopback */
-void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
 
 /* set rx traffic class mode */
-void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
-				    u32 rx_traf_class_mode);
+void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+					   u32 rx_traf_class_mode);
 
 /* set rx buffer enable */
-void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
+void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
 
 /* set rx buffer high threshold (per tc) */
-void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 rx_buff_hi_threshold_per_tc,
-					 u32 buffer);
+void hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 rx_buff_hi_threshold_per_tc,
+						u32 buffer);
 
 /* set rx buffer low threshold (per tc) */
-void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 rx_buff_lo_threshold_per_tc,
+void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 rx_buff_lo_threshold_per_tc,
 					 u32 buffer);
 
 /* set rx flow control mode */
-void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
 
 /* set rx packet buffer size (per tc) */
-void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
-				     u32 rx_pkt_buff_size_per_tc,
-				     u32 buffer);
+void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+					    u32 rx_pkt_buff_size_per_tc,
+					    u32 buffer);
 
 /* set rx xoff enable (per tc) */
-void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
-			       u32 buffer);
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+				      u32 buffer);
 
 /* rpf */
 
 /* set l2 broadcast count threshold */
-void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
-					u32 l2broadcast_count_threshold);
+void hw_atl_rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+					       u32 l2broadcast_count_threshold);
 
 /* set l2 broadcast enable */
-void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
+void hw_atl_rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
 
 /* set l2 broadcast filter action */
-void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
-				u32 l2broadcast_flr_act);
+void hw_atl_rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+				       u32 l2broadcast_flr_act);
 
 /* set l2 multicast filter enable */
-void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
-			       u32 filter);
+void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
+				      u32 l2multicast_flr_en,
+				      u32 filter);
 
 /* set l2 promiscuous mode enable */
-void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
-				  u32 l2promiscuous_mode_en);
+void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+					 u32 l2promiscuous_mode_en);
 
 /* set l2 unicast filter action */
-void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
-			      u32 filter);
+void hw_atl_rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw,
+				     u32 l2unicast_flr_act,
+				     u32 filter);
 
 /* set l2 unicast filter enable */
-void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
-			 u32 filter);
+void hw_atl_rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+				u32 filter);
 
 /* set l2 unicast destination address lsw */
-void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
-				      u32 l2unicast_dest_addresslsw,
+void hw_atl_rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+					     u32 l2unicast_dest_addresslsw,
 				      u32 filter);
 
 /* set l2 unicast destination address msw */
-void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
-				      u32 l2unicast_dest_addressmsw,
+void hw_atl_rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+					     u32 l2unicast_dest_addressmsw,
 				      u32 filter);
 
 /* Set L2 Accept all Multicast packets */
-void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
-				     u32 l2_accept_all_mc_packets);
+void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+					    u32 l2_accept_all_mc_packets);
 
 /* set user-priority tc mapping */
-void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
-				      u32 user_priority_tc_map, u32 tc);
+void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+					     u32 user_priority_tc_map, u32 tc);
 
 /* set rss key address */
-void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
+void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
 
 /* set rss key write data */
-void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
+void hw_atl_rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
 
 /* get rss key write enable */
-u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
 
 /* set rss key write enable */
-void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
+void hw_atl_rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
 
 /* set rss redirection table address */
-void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
-				u32 rss_redir_tbl_addr);
+void hw_atl_rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+				       u32 rss_redir_tbl_addr);
 
 /* set rss redirection table write data */
-void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
-				   u32 rss_redir_tbl_wr_data);
+void hw_atl_rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+					  u32 rss_redir_tbl_wr_data);
 
 /* get rss redirection write enable */
-u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
 
 /* set rss redirection write enable */
-void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
+void hw_atl_rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
 
 /* set tpo to rpf system loopback */
-void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
-				u32 tpo_to_rpf_sys_lbk);
+void hw_atl_rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+				       u32 tpo_to_rpf_sys_lbk);
 
 /* set vlan inner ethertype */
-void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
+void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
 
 /* set vlan outer ethertype */
-void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
+void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
 
 /* set vlan promiscuous mode enable */
-void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en);
+void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
+				      u32 vlan_prom_mode_en);
 
 /* Set VLAN untagged action */
-void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act);
+void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
+				      u32 vlan_untagged_act);
 
 /* Set VLAN accept untagged packets */
-void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
-					  u32 vlan_accept_untagged_packets);
+void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+						 u32 vlan_acc_untagged_packets);
 
 /* Set VLAN filter enable */
-void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter);
-
-/* Set VLAN Filter Action */
-void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
-			  u32 filter);
-
-/* Set VLAN ID Filter */
-void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter);
-
-/* set ethertype filter enable */
-void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter);
-
-/* set  ethertype user-priority enable */
-void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
-				   u32 etht_user_priority_en, u32 filter);
-
-/* set  ethertype rx queue enable */
-void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
-			      u32 filter);
-
-/* set ethertype rx queue */
-void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
-			   u32 filter);
-
-/* set ethertype user-priority */
-void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
+void hw_atl_rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en,
 				u32 filter);
 
+/* Set VLAN Filter Action */
+void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
+				 u32 filter);
+
+/* Set VLAN ID Filter */
+void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
+				u32 filter);
+
+/* set ethertype filter enable */
+void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
+				u32 filter);
+
+/* set  ethertype user-priority enable */
+void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+					  u32 etht_user_priority_en,
+					  u32 filter);
+
+/* set  ethertype rx queue enable */
+void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw,
+				     u32 etht_rx_queue_en,
+				     u32 filter);
+
+/* set ethertype rx queue */
+void hw_atl_rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+				  u32 filter);
+
+/* set ethertype user-priority */
+void hw_atl_rpf_etht_user_priority_set(struct aq_hw_s *aq_hw,
+				       u32 etht_user_priority,
+				       u32 filter);
+
 /* set ethertype management queue */
-void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
-			    u32 filter);
+void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+				   u32 filter);
 
 /* set ethertype filter action */
-void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
-			  u32 filter);
+void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+				 u32 filter);
 
 /* set ethertype filter */
-void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
 
 /* rpo */
 
 /* set ipv4 header checksum offload enable */
-void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				       u32 ipv4header_crc_offload_en);
+void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					      u32 ipv4header_crc_offload_en);
 
 /* set rx descriptor vlan stripping */
-void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
-				    u32 rx_desc_vlan_stripping,
-				    u32 descriptor);
+void hw_atl_rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+					   u32 rx_desc_vlan_stripping,
+					   u32 descriptor);
 
 /* set tcp/udp checksum offload enable */
-void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				    u32 tcp_udp_crc_offload_en);
+void hw_atl_rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					   u32 tcp_udp_crc_offload_en);
 
 /* Set LRO Patch Optimization Enable. */
-void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
-				       u32 lro_patch_optimization_en);
+void hw_atl_rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+					      u32 lro_patch_optimization_en);
 
 /* Set Large Receive Offload Enable */
-void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
+void hw_atl_rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
 
 /* Set LRO Q Sessions Limit */
-void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim);
+void hw_atl_rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+				      u32 lro_qsessions_lim);
 
 /* Set LRO Total Descriptor Limit */
-void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim);
+void hw_atl_rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw,
+				       u32 lro_total_desc_lim);
 
 /* Set LRO Min Payload of First Packet */
-void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
-				      u32 lro_min_pld_of_first_pkt);
+void hw_atl_rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+					     u32 lro_min_pld_of_first_pkt);
 
 /* Set LRO Packet Limit */
-void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
+void hw_atl_rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
 
 /* Set LRO Max Number of Descriptors */
-void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
-					u32 lro_max_desc_num, u32 lro);
+void hw_atl_rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+					       u32 lro_max_desc_num, u32 lro);
 
 /* Set LRO Time Base Divider */
-void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
-				   u32 lro_time_base_divider);
+void hw_atl_rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+					  u32 lro_time_base_divider);
 
 /*Set LRO Inactive Interval */
-void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
-				   u32 lro_inactive_interval);
+void hw_atl_rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+					  u32 lro_inactive_interval);
 
 /*Set LRO Max Coalescing Interval */
-void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
-					 u32 lro_max_coalescing_interval);
+void hw_atl_rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+						u32 lro_max_coal_interval);
 
 /* rx */
 
 /* set rx register reset disable */
-void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
+void hw_atl_rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
 
 /* tdm */
 
 /* set cpu id */
-void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
 
 /* set large send offload enable */
-void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
-				   u32 large_send_offload_en);
+void hw_atl_tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+					  u32 large_send_offload_en);
 
 /* set tx descriptor enable */
-void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor);
+void hw_atl_tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en,
+			       u32 descriptor);
 
 /* set tx dca enable */
-void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
+void hw_atl_tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
 
 /* set tx dca mode */
-void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
+void hw_atl_tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
 
 /* set tx descriptor dca enable */
-void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca);
+void hw_atl_tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en,
+				   u32 dca);
 
 /* get tx descriptor head pointer */
-u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+u32 hw_atl_tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
 
 /* set tx descriptor length */
-void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
-			 u32 descriptor);
+void hw_atl_tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+				u32 descriptor);
 
 /* set tx descriptor write-back interrupt enable */
-void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
-				  u32 tx_desc_wr_wb_irq_en);
+void hw_atl_tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+					 u32 tx_desc_wr_wb_irq_en);
 
 /* set tx descriptor write-back threshold */
-void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
-				     u32 tx_desc_wr_wb_threshold,
+void hw_atl_tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+					    u32 tx_desc_wr_wb_threshold,
 				     u32 descriptor);
 
 /* Set TDM Interrupt Moderation Enable */
-void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
-			       u32 tdm_irq_moderation_en);
+void hw_atl_tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+				      u32 tdm_irq_moderation_en);
 /* thm */
 
 /* set lso tcp flag of first packet */
-void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
-				       u32 lso_tcp_flag_of_first_pkt);
+void hw_atl_thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+					      u32 lso_tcp_flag_of_first_pkt);
 
 /* set lso tcp flag of last packet */
-void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
-				      u32 lso_tcp_flag_of_last_pkt);
+void hw_atl_thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+					     u32 lso_tcp_flag_of_last_pkt);
 
 /* set lso tcp flag of middle packet */
-void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
-					u32 lso_tcp_flag_of_middle_pkt);
+void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+					       u32 lso_tcp_flag_of_middle_pkt);
 
 /* tpb */
 
 /* set tx buffer enable */
-void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
+void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
 
 /* set tx buffer high threshold (per tc) */
-void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 tx_buff_hi_threshold_per_tc,
+void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 tx_buff_hi_threshold_per_tc,
 					 u32 buffer);
 
 /* set tx buffer low threshold (per tc) */
-void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
-					 u32 tx_buff_lo_threshold_per_tc,
+void hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+						u32 tx_buff_lo_threshold_per_tc,
 					 u32 buffer);
 
 /* set tx dma system loopback enable */
-void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
+void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
 
 /* set tx packet buffer size (per tc) */
-void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
-				     u32 tx_pkt_buff_size_per_tc, u32 buffer);
+void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+					    u32 tx_pkt_buff_size_per_tc, u32 buffer);
 
 /* set tx path pad insert enable */
-void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
+void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
 
 /* tpo */
 
 /* set ipv4 header checksum offload enable */
-void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				       u32 ipv4header_crc_offload_en);
+void hw_atl_tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					      u32 ipv4header_crc_offload_en);
 
 /* set tcp/udp checksum offload enable */
-void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
-				    u32 tcp_udp_crc_offload_en);
+void hw_atl_tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+					   u32 tcp_udp_crc_offload_en);
 
 /* set tx pkt system loopback enable */
-void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en);
+void hw_atl_tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw,
+				      u32 tx_pkt_sys_lbk_en);
 
 /* tps */
 
 /* set tx packet scheduler data arbitration mode */
-void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
-				       u32 tx_pkt_shed_data_arb_mode);
+void hw_atl_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+					      u32 tx_pkt_shed_data_arb_mode);
 
 /* set tx packet scheduler descriptor rate current time reset */
-void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
-						 u32 curr_time_res);
+void hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+							u32 curr_time_res);
 
 /* set tx packet scheduler descriptor rate limit */
-void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
-				       u32 tx_pkt_shed_desc_rate_lim);
+void hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+					      u32 tx_pkt_shed_desc_rate_lim);
 
 /* set tx packet scheduler descriptor tc arbitration mode */
-void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
-					  u32 tx_pkt_shed_desc_tc_arb_mode);
+void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+						 u32 arb_mode);
 
 /* set tx packet scheduler descriptor tc max credit */
-void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
-					    u32 tx_pkt_shed_desc_tc_max_credit,
+void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+						   u32 max_credit,
 					    u32 tc);
 
 /* set tx packet scheduler descriptor tc weight */
-void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
-					u32 tx_pkt_shed_desc_tc_weight,
+void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+					       u32 tx_pkt_shed_desc_tc_weight,
 					u32 tc);
 
 /* set tx packet scheduler descriptor vm arbitration mode */
-void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
-					  u32 tx_pkt_shed_desc_vm_arb_mode);
+void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+						 u32 arb_mode);
 
 /* set tx packet scheduler tc data max credit */
-void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
-					    u32 tx_pkt_shed_tc_data_max_credit,
+void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+						   u32 max_credit,
 					    u32 tc);
 
 /* set tx packet scheduler tc data weight */
-void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
-					u32 tx_pkt_shed_tc_data_weight,
+void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+					       u32 tx_pkt_shed_tc_data_weight,
 					u32 tc);
 
 /* tx */
 
 /* set tx register reset disable */
-void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
+void hw_atl_tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
 
 /* msm */
 
 /* get register access status */
-u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_msm_reg_access_status_get(struct aq_hw_s *aq_hw);
 
 /* set  register address for indirect address */
-void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
-					u32 reg_addr_for_indirect_addr);
+void hw_atl_msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+					       u32 reg_addr_for_indirect_addr);
 
 /* set register read strobe */
-void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
+void hw_atl_msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
 
 /* get  register read data */
-u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
+u32 hw_atl_msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
 
 /* set  register write data */
-void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
+void hw_atl_msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
 
 /* set register write strobe */
-void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
+void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
 
 /* pci */
 
 /* set pci register reset disable */
-void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
 
 #endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 93450ec..e0cf701 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -18,91 +18,91 @@
  * base address: 0x000003a0
  * parameter: semaphore {s} | stride size 0x4 | range [0, 15]
  */
-#define glb_cpu_sem_adr(semaphore)  (0x000003a0u + (semaphore) * 0x4)
+#define HW_ATL_GLB_CPU_SEM_ADR(semaphore)  (0x000003a0u + (semaphore) * 0x4)
 /* register address for bitfield rx dma good octet counter lsw [1f:0] */
-#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW 0x00006808
 /* register address for bitfield rx dma good packet counter lsw [1f:0] */
-#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW 0x00006800
 /* register address for bitfield tx dma good octet counter lsw [1f:0] */
-#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW 0x00008808
 /* register address for bitfield tx dma good packet counter lsw [1f:0] */
-#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW 0x00008800
 
 /* register address for bitfield rx dma good octet counter msw [3f:20] */
-#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c
+#define HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW 0x0000680c
 /* register address for bitfield rx dma good packet counter msw [3f:20] */
-#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804
+#define HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW 0x00006804
 /* register address for bitfield tx dma good octet counter msw [3f:20] */
-#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c
+#define HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW 0x0000880c
 /* register address for bitfield tx dma good packet counter msw [3f:20] */
-#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804
+#define HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW 0x00008804
 
 /* preprocessor definitions for msm rx errors counter register */
-#define mac_msm_rx_errs_cnt_adr 0x00000120u
+#define HW_ATL_MAC_MSM_RX_ERRS_CNT_ADR 0x00000120u
 
 /* preprocessor definitions for msm rx unicast frames counter register */
-#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u
+#define HW_ATL_MAC_MSM_RX_UCST_FRM_CNT_ADR 0x000000e0u
 
 /* preprocessor definitions for msm rx multicast frames counter register */
-#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u
+#define HW_ATL_MAC_MSM_RX_MCST_FRM_CNT_ADR 0x000000e8u
 
 /* preprocessor definitions for msm rx broadcast frames counter register */
-#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u
+#define HW_ATL_MAC_MSM_RX_BCST_FRM_CNT_ADR 0x000000f0u
 
 /* preprocessor definitions for msm rx broadcast octets counter register 1 */
-#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER1_ADR 0x000001b0u
 
 /* preprocessor definitions for msm rx broadcast octets counter register 2 */
-#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u
+#define HW_ATL_MAC_MSM_RX_BCST_OCTETS_COUNTER2_ADR 0x000001b4u
 
 /* preprocessor definitions for msm rx unicast octets counter register 0 */
-#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u
+#define HW_ATL_MAC_MSM_RX_UCST_OCTETS_COUNTER0_ADR 0x000001b8u
 
 /* preprocessor definitions for rx dma statistics counter 7 */
-#define rx_dma_stat_counter7_adr 0x00006818u
+#define HW_ATL_RX_DMA_STAT_COUNTER7_ADR 0x00006818u
 
 /* preprocessor definitions for msm tx unicast frames counter register */
-#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u
+#define HW_ATL_MAC_MSM_TX_UCST_FRM_CNT_ADR 0x00000108u
 
 /* preprocessor definitions for msm tx multicast frames counter register */
-#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u
+#define HW_ATL_MAC_MSM_TX_MCST_FRM_CNT_ADR 0x00000110u
 
 /* preprocessor definitions for global mif identification */
-#define glb_mif_id_adr 0x0000001cu
+#define HW_ATL_GLB_MIF_ID_ADR 0x0000001cu
 
 /* register address for bitfield iamr_lsw[1f:0] */
-#define itr_iamrlsw_adr 0x00002090
+#define HW_ATL_ITR_IAMRLSW_ADR 0x00002090
 /* register address for bitfield rx dma drop packet counter [1f:0] */
-#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818
+#define HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR 0x00006818
 
 /* register address for bitfield imcr_lsw[1f:0] */
-#define itr_imcrlsw_adr 0x00002070
+#define HW_ATL_ITR_IMCRLSW_ADR 0x00002070
 /* register address for bitfield imsr_lsw[1f:0] */
-#define itr_imsrlsw_adr 0x00002060
+#define HW_ATL_ITR_IMSRLSW_ADR 0x00002060
 /* register address for bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_adr 0x00002300
+#define HW_ATL_ITR_REG_RES_DSBL_ADR 0x00002300
 /* bitmask for bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_ITR_REG_RES_DSBL_MSK 0x20000000
 /* lower bit position of bitfield itr_reg_res_dsbl */
-#define itr_reg_res_dsbl_shift 29
+#define HW_ATL_ITR_REG_RES_DSBL_SHIFT 29
 /* register address for bitfield iscr_lsw[1f:0] */
-#define itr_iscrlsw_adr 0x00002050
+#define HW_ATL_ITR_ISCRLSW_ADR 0x00002050
 /* register address for bitfield isr_lsw[1f:0] */
-#define itr_isrlsw_adr 0x00002000
+#define HW_ATL_ITR_ISRLSW_ADR 0x00002000
 /* register address for bitfield itr_reset */
-#define itr_res_adr 0x00002300
+#define HW_ATL_ITR_RES_ADR 0x00002300
 /* bitmask for bitfield itr_reset */
-#define itr_res_msk 0x80000000
+#define HW_ATL_ITR_RES_MSK 0x80000000
 /* lower bit position of bitfield itr_reset */
-#define itr_res_shift 31
+#define HW_ATL_ITR_RES_SHIFT 31
 /* register address for bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4)
 /* bitmask for bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_msk 0x000000ff
+#define HW_ATL_RDM_DCADCPUID_MSK 0x000000ff
 /* lower bit position of bitfield dca{d}_cpuid[7:0] */
-#define rdm_dcadcpuid_shift 0
+#define HW_ATL_RDM_DCADCPUID_SHIFT 0
 /* register address for bitfield dca_en */
-#define rdm_dca_en_adr 0x00006180
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
 
 /* rx dca_en bitfield definitions
  * preprocessor definitions for the bitfield "dca_en".
@@ -110,17 +110,17 @@
  */
 
 /* register address for bitfield dca_en */
-#define rdm_dca_en_adr 0x00006180
+#define HW_ATL_RDM_DCA_EN_ADR 0x00006180
 /* bitmask for bitfield dca_en */
-#define rdm_dca_en_msk 0x80000000
+#define HW_ATL_RDM_DCA_EN_MSK 0x80000000
 /* inverted bitmask for bitfield dca_en */
-#define rdm_dca_en_mskn 0x7fffffff
+#define HW_ATL_RDM_DCA_EN_MSKN 0x7fffffff
 /* lower bit position of bitfield dca_en */
-#define rdm_dca_en_shift 31
+#define HW_ATL_RDM_DCA_EN_SHIFT 31
 /* width of bitfield dca_en */
-#define rdm_dca_en_width 1
+#define HW_ATL_RDM_DCA_EN_WIDTH 1
 /* default value of bitfield dca_en */
-#define rdm_dca_en_default 0x1
+#define HW_ATL_RDM_DCA_EN_DEFAULT 0x1
 
 /* rx dca_mode[3:0] bitfield definitions
  * preprocessor definitions for the bitfield "dca_mode[3:0]".
@@ -128,17 +128,17 @@
  */
 
 /* register address for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_adr 0x00006180
+#define HW_ATL_RDM_DCA_MODE_ADR 0x00006180
 /* bitmask for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_msk 0x0000000f
+#define HW_ATL_RDM_DCA_MODE_MSK 0x0000000f
 /* inverted bitmask for bitfield dca_mode[3:0] */
-#define rdm_dca_mode_mskn 0xfffffff0
+#define HW_ATL_RDM_DCA_MODE_MSKN 0xfffffff0
 /* lower bit position of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_shift 0
+#define HW_ATL_RDM_DCA_MODE_SHIFT 0
 /* width of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_width 4
+#define HW_ATL_RDM_DCA_MODE_WIDTH 4
 /* default value of bitfield dca_mode[3:0] */
-#define rdm_dca_mode_default 0x0
+#define HW_ATL_RDM_DCA_MODE_DEFAULT 0x0
 
 /* rx desc{d}_data_size[4:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]".
@@ -147,17 +147,18 @@
  */
 
 /* register address for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDDATA_SIZE_ADR(descriptor) \
+	(0x00005b18 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_msk 0x0000001f
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSK 0x0000001f
 /* inverted bitmask for bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_mskn 0xffffffe0
+#define HW_ATL_RDM_DESCDDATA_SIZE_MSKN 0xffffffe0
 /* lower bit position of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_shift 0
+#define HW_ATL_RDM_DESCDDATA_SIZE_SHIFT 0
 /* width of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_width 5
+#define HW_ATL_RDM_DESCDDATA_SIZE_WIDTH 5
 /* default value of bitfield desc{d}_data_size[4:0] */
-#define rdm_descddata_size_default 0x0
+#define HW_ATL_RDM_DESCDDATA_SIZE_DEFAULT 0x0
 
 /* rx dca{d}_desc_en bitfield definitions
  * preprocessor definitions for the bitfield "dca{d}_desc_en".
@@ -166,17 +167,17 @@
  */
 
 /* register address for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADDESC_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
 /* bitmask for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_msk 0x80000000
+#define HW_ATL_RDM_DCADDESC_EN_MSK 0x80000000
 /* inverted bitmask for bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_mskn 0x7fffffff
+#define HW_ATL_RDM_DCADDESC_EN_MSKN 0x7fffffff
 /* lower bit position of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_shift 31
+#define HW_ATL_RDM_DCADDESC_EN_SHIFT 31
 /* width of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_width 1
+#define HW_ATL_RDM_DCADDESC_EN_WIDTH 1
 /* default value of bitfield dca{d}_desc_en */
-#define rdm_dcaddesc_en_default 0x0
+#define HW_ATL_RDM_DCADDESC_EN_DEFAULT 0x0
 
 /* rx desc{d}_en bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_en".
@@ -185,17 +186,17 @@
  */
 
 /* register address for bitfield desc{d}_en */
-#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_en */
-#define rdm_descden_msk 0x80000000
+#define HW_ATL_RDM_DESCDEN_MSK 0x80000000
 /* inverted bitmask for bitfield desc{d}_en */
-#define rdm_descden_mskn 0x7fffffff
+#define HW_ATL_RDM_DESCDEN_MSKN 0x7fffffff
 /* lower bit position of bitfield desc{d}_en */
-#define rdm_descden_shift 31
+#define HW_ATL_RDM_DESCDEN_SHIFT 31
 /* width of bitfield desc{d}_en */
-#define rdm_descden_width 1
+#define HW_ATL_RDM_DESCDEN_WIDTH 1
 /* default value of bitfield desc{d}_en */
-#define rdm_descden_default 0x0
+#define HW_ATL_RDM_DESCDEN_DEFAULT 0x0
 
 /* rx desc{d}_hdr_size[4:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]".
@@ -204,17 +205,18 @@
  */
 
 /* register address for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHDR_SIZE_ADR(descriptor) \
+	(0x00005b18 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_msk 0x00001f00
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSK 0x00001f00
 /* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_mskn 0xffffe0ff
+#define HW_ATL_RDM_DESCDHDR_SIZE_MSKN 0xffffe0ff
 /* lower bit position of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_shift 8
+#define HW_ATL_RDM_DESCDHDR_SIZE_SHIFT 8
 /* width of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_width 5
+#define HW_ATL_RDM_DESCDHDR_SIZE_WIDTH 5
 /* default value of bitfield desc{d}_hdr_size[4:0] */
-#define rdm_descdhdr_size_default 0x0
+#define HW_ATL_RDM_DESCDHDR_SIZE_DEFAULT 0x0
 
 /* rx desc{d}_hdr_split bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_hdr_split".
@@ -223,17 +225,18 @@
  */
 
 /* register address for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHDR_SPLIT_ADR(descriptor) \
+	(0x00005b08 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_msk 0x10000000
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSK 0x10000000
 /* inverted bitmask for bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_mskn 0xefffffff
+#define HW_ATL_RDM_DESCDHDR_SPLIT_MSKN 0xefffffff
 /* lower bit position of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_shift 28
+#define HW_ATL_RDM_DESCDHDR_SPLIT_SHIFT 28
 /* width of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_width 1
+#define HW_ATL_RDM_DESCDHDR_SPLIT_WIDTH 1
 /* default value of bitfield desc{d}_hdr_split */
-#define rdm_descdhdr_split_default 0x0
+#define HW_ATL_RDM_DESCDHDR_SPLIT_DEFAULT 0x0
 
 /* rx desc{d}_hd[c:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
@@ -242,15 +245,15 @@
  */
 
 /* register address for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDHD_ADR(descriptor) (0x00005b0c + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_msk 0x00001fff
+#define HW_ATL_RDM_DESCDHD_MSK 0x00001fff
 /* inverted bitmask for bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_mskn 0xffffe000
+#define HW_ATL_RDM_DESCDHD_MSKN 0xffffe000
 /* lower bit position of bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_shift 0
+#define HW_ATL_RDM_DESCDHD_SHIFT 0
 /* width of bitfield desc{d}_hd[c:0] */
-#define rdm_descdhd_width 13
+#define HW_ATL_RDM_DESCDHD_WIDTH 13
 
 /* rx desc{d}_len[9:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
@@ -259,17 +262,17 @@
  */
 
 /* register address for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDLEN_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_msk 0x00001ff8
+#define HW_ATL_RDM_DESCDLEN_MSK 0x00001ff8
 /* inverted bitmask for bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_mskn 0xffffe007
+#define HW_ATL_RDM_DESCDLEN_MSKN 0xffffe007
 /* lower bit position of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_shift 3
+#define HW_ATL_RDM_DESCDLEN_SHIFT 3
 /* width of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_width 10
+#define HW_ATL_RDM_DESCDLEN_WIDTH 10
 /* default value of bitfield desc{d}_len[9:0] */
-#define rdm_descdlen_default 0x0
+#define HW_ATL_RDM_DESCDLEN_DEFAULT 0x0
 
 /* rx desc{d}_reset bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_reset".
@@ -278,17 +281,17 @@
  */
 
 /* register address for bitfield desc{d}_reset */
-#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RDM_DESCDRESET_ADR(descriptor) (0x00005b08 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_reset */
-#define rdm_descdreset_msk 0x02000000
+#define HW_ATL_RDM_DESCDRESET_MSK 0x02000000
 /* inverted bitmask for bitfield desc{d}_reset */
-#define rdm_descdreset_mskn 0xfdffffff
+#define HW_ATL_RDM_DESCDRESET_MSKN 0xfdffffff
 /* lower bit position of bitfield desc{d}_reset */
-#define rdm_descdreset_shift 25
+#define HW_ATL_RDM_DESCDRESET_SHIFT 25
 /* width of bitfield desc{d}_reset */
-#define rdm_descdreset_width 1
+#define HW_ATL_RDM_DESCDRESET_WIDTH 1
 /* default value of bitfield desc{d}_reset */
-#define rdm_descdreset_default 0x0
+#define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
 
 /* rx int_desc_wrb_en bitfield definitions
  * preprocessor definitions for the bitfield "int_desc_wrb_en".
@@ -296,17 +299,17 @@
  */
 
 /* register address for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_adr 0x00005a30
+#define HW_ATL_RDM_INT_DESC_WRB_EN_ADR 0x00005a30
 /* bitmask for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_msk 0x00000004
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSK 0x00000004
 /* inverted bitmask for bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_mskn 0xfffffffb
+#define HW_ATL_RDM_INT_DESC_WRB_EN_MSKN 0xfffffffb
 /* lower bit position of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_shift 2
+#define HW_ATL_RDM_INT_DESC_WRB_EN_SHIFT 2
 /* width of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_width 1
+#define HW_ATL_RDM_INT_DESC_WRB_EN_WIDTH 1
 /* default value of bitfield int_desc_wrb_en */
-#define rdm_int_desc_wrb_en_default 0x0
+#define HW_ATL_RDM_INT_DESC_WRB_EN_DEFAULT 0x0
 
 /* rx dca{d}_hdr_en bitfield definitions
  * preprocessor definitions for the bitfield "dca{d}_hdr_en".
@@ -315,17 +318,17 @@
  */
 
 /* register address for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADHDR_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
 /* bitmask for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_msk 0x40000000
+#define HW_ATL_RDM_DCADHDR_EN_MSK 0x40000000
 /* inverted bitmask for bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_mskn 0xbfffffff
+#define HW_ATL_RDM_DCADHDR_EN_MSKN 0xbfffffff
 /* lower bit position of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_shift 30
+#define HW_ATL_RDM_DCADHDR_EN_SHIFT 30
 /* width of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_width 1
+#define HW_ATL_RDM_DCADHDR_EN_WIDTH 1
 /* default value of bitfield dca{d}_hdr_en */
-#define rdm_dcadhdr_en_default 0x0
+#define HW_ATL_RDM_DCADHDR_EN_DEFAULT 0x0
 
 /* rx dca{d}_pay_en bitfield definitions
  * preprocessor definitions for the bitfield "dca{d}_pay_en".
@@ -334,17 +337,17 @@
  */
 
 /* register address for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4)
+#define HW_ATL_RDM_DCADPAY_EN_ADR(dca) (0x00006100 + (dca) * 0x4)
 /* bitmask for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_msk 0x20000000
+#define HW_ATL_RDM_DCADPAY_EN_MSK 0x20000000
 /* inverted bitmask for bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_mskn 0xdfffffff
+#define HW_ATL_RDM_DCADPAY_EN_MSKN 0xdfffffff
 /* lower bit position of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_shift 29
+#define HW_ATL_RDM_DCADPAY_EN_SHIFT 29
 /* width of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_width 1
+#define HW_ATL_RDM_DCADPAY_EN_WIDTH 1
 /* default value of bitfield dca{d}_pay_en */
-#define rdm_dcadpay_en_default 0x0
+#define HW_ATL_RDM_DCADPAY_EN_DEFAULT 0x0
 
 /* RX rdm_int_rim_en Bitfield Definitions
  * Preprocessor definitions for the bitfield "rdm_int_rim_en".
@@ -352,51 +355,51 @@
  */
 
 /* Register address for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_adr 0x00005A30
+#define HW_ATL_RDM_INT_RIM_EN_ADR 0x00005A30
 /* Bitmask for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_msk 0x00000008
+#define HW_ATL_RDM_INT_RIM_EN_MSK 0x00000008
 /* Inverted bitmask for bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_mskn 0xFFFFFFF7
+#define HW_ATL_RDM_INT_RIM_EN_MSKN 0xFFFFFFF7
 /* Lower bit position of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_shift 3
+#define HW_ATL_RDM_INT_RIM_EN_SHIFT 3
 /* Width of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_width 1
+#define HW_ATL_RDM_INT_RIM_EN_WIDTH 1
 /* Default value of bitfield rdm_int_rim_en */
-#define rdm_int_rim_en_default 0x0
+#define HW_ATL_RDM_INT_RIM_EN_DEFAULT 0x0
 
 /* general interrupt mapping register definitions
  * preprocessor definitions for general interrupt mapping register
  * base address: 0x00002180
  * parameter: regidx {f} | stride size 0x4 | range [0, 3]
  */
-#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4)
+#define HW_ATL_GEN_INTR_MAP_ADR(regidx) (0x00002180u + (regidx) * 0x4)
 
 /* general interrupt status register definitions
  * preprocessor definitions for general interrupt status register
  * address: 0x000021A0
  */
 
-#define gen_intr_stat_adr 0x000021A4U
+#define HW_ATL_GEN_INTR_STAT_ADR 0x000021A4U
 
 /* interrupt global control register  definitions
  * preprocessor definitions for interrupt global control register
  * address: 0x00002300
  */
-#define intr_glb_ctl_adr 0x00002300u
+#define HW_ATL_INTR_GLB_CTL_ADR 0x00002300u
 
 /* interrupt throttle register definitions
  * preprocessor definitions for interrupt throttle register
  * base address: 0x00002800
  * parameter: throttle {t} | stride size 0x4 | range [0, 31]
  */
-#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4)
+#define HW_ATL_INTR_THR_ADR(throttle) (0x00002800u + (throttle) * 0x4)
 
 /* rx dma descriptor base address lsw definitions
  * preprocessor definitions for rx dma descriptor base address lsw
  * base address: 0x00005b00
  * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
  */
-#define rx_dma_desc_base_addrlsw_adr(descriptor) \
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
 (0x00005b00u + (descriptor) * 0x20)
 
 /* rx dma descriptor base address msw definitions
@@ -404,7 +407,7 @@
  * base address: 0x00005b04
  * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
  */
-#define rx_dma_desc_base_addrmsw_adr(descriptor) \
+#define HW_ATL_RX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
 (0x00005b04u + (descriptor) * 0x20)
 
 /* rx dma descriptor status register definitions
@@ -412,46 +415,48 @@
  * base address: 0x00005b14
  * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
  */
-#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20)
+#define HW_ATL_RX_DMA_DESC_STAT_ADR(descriptor) \
+	(0x00005b14u + (descriptor) * 0x20)
 
 /* rx dma descriptor tail pointer register definitions
  * preprocessor definitions for rx dma descriptor tail pointer register
  * base address: 0x00005b10
  * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
  */
-#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20)
+#define HW_ATL_RX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+	(0x00005b10u + (descriptor) * 0x20)
 
 /* rx interrupt moderation control register definitions
  * Preprocessor definitions for RX Interrupt Moderation Control Register
  * Base Address: 0x00005A40
  * Parameter: RIM {R} | stride size 0x4 | range [0, 31]
  */
-#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4)
+#define HW_ATL_RX_INTR_MODERATION_CTL_ADR(rim) (0x00005A40u + (rim) * 0x4)
 
 /* rx filter multicast filter mask register definitions
  * preprocessor definitions for rx filter multicast filter mask register
  * address: 0x00005270
  */
-#define rx_flr_mcst_flr_msk_adr 0x00005270u
+#define HW_ATL_RX_FLR_MCST_FLR_MSK_ADR 0x00005270u
 
 /* rx filter multicast filter register definitions
  * preprocessor definitions for rx filter multicast filter register
  * base address: 0x00005250
  * parameter: filter {f} | stride size 0x4 | range [0, 7]
  */
-#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4)
+#define HW_ATL_RX_FLR_MCST_FLR_ADR(filter) (0x00005250u + (filter) * 0x4)
 
 /* RX Filter RSS Control Register 1 Definitions
  * Preprocessor definitions for RX Filter RSS Control Register 1
  * Address: 0x000054C0
  */
-#define rx_flr_rss_control1_adr 0x000054C0u
+#define HW_ATL_RX_FLR_RSS_CONTROL1_ADR 0x000054C0u
 
 /* RX Filter Control Register 2 Definitions
  * Preprocessor definitions for RX Filter Control Register 2
  * Address: 0x00005104
  */
-#define rx_flr_control2_adr 0x00005104u
+#define HW_ATL_RX_FLR_CONTROL2_ADR 0x00005104u
 
 /* tx tx dma debug control [1f:0] bitfield definitions
  * preprocessor definitions for the bitfield "tx dma debug control [1f:0]".
@@ -459,24 +464,24 @@
  */
 
 /* register address for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_adr 0x00008920
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_ADR 0x00008920
 /* bitmask for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_msk 0xffffffff
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSK 0xffffffff
 /* inverted bitmask for bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_mskn 0x00000000
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_MSKN 0x00000000
 /* lower bit position of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_shift 0
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_SHIFT 0
 /* width of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_width 32
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_WIDTH 32
 /* default value of bitfield tx dma debug control [1f:0] */
-#define tdm_tx_dma_debug_ctl_default 0x0
+#define HW_ATL_TDM_TX_DMA_DEBUG_CTL_DEFAULT 0x0
 
 /* tx dma descriptor base address lsw definitions
  * preprocessor definitions for tx dma descriptor base address lsw
  * base address: 0x00007c00
  * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
  */
-#define tx_dma_desc_base_addrlsw_adr(descriptor) \
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRLSW_ADR(descriptor) \
 	(0x00007c00u + (descriptor) * 0x40)
 
 /* tx dma descriptor tail pointer register definitions
@@ -484,7 +489,8 @@
  * base address: 0x00007c10
  *  parameter: descriptor {d} | stride size 0x40 | range [0, 31]
  */
-#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40)
+#define HW_ATL_TX_DMA_DESC_TAIL_PTR_ADR(descriptor) \
+	(0x00007c10u + (descriptor) * 0x40)
 
 /* rx dma_sys_loopback bitfield definitions
  * preprocessor definitions for the bitfield "dma_sys_loopback".
@@ -492,17 +498,17 @@
  */
 
 /* register address for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_adr 0x00005000
+#define HW_ATL_RPB_DMA_SYS_LBK_ADR 0x00005000
 /* bitmask for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_msk 0x00000040
+#define HW_ATL_RPB_DMA_SYS_LBK_MSK 0x00000040
 /* inverted bitmask for bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_mskn 0xffffffbf
+#define HW_ATL_RPB_DMA_SYS_LBK_MSKN 0xffffffbf
 /* lower bit position of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_shift 6
+#define HW_ATL_RPB_DMA_SYS_LBK_SHIFT 6
 /* width of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_width 1
+#define HW_ATL_RPB_DMA_SYS_LBK_WIDTH 1
 /* default value of bitfield dma_sys_loopback */
-#define rpb_dma_sys_lbk_default 0x0
+#define HW_ATL_RPB_DMA_SYS_LBK_DEFAULT 0x0
 
 /* rx rx_tc_mode bitfield definitions
  * preprocessor definitions for the bitfield "rx_tc_mode".
@@ -510,17 +516,17 @@
  */
 
 /* register address for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_adr 0x00005700
+#define HW_ATL_RPB_RPF_RX_TC_MODE_ADR 0x00005700
 /* bitmask for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_msk 0x00000100
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSK 0x00000100
 /* inverted bitmask for bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff
+#define HW_ATL_RPB_RPF_RX_TC_MODE_MSKN 0xfffffeff
 /* lower bit position of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_shift 8
+#define HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT 8
 /* width of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_width 1
+#define HW_ATL_RPB_RPF_RX_TC_MODE_WIDTH 1
 /* default value of bitfield rx_tc_mode */
-#define rpb_rpf_rx_tc_mode_default 0x0
+#define HW_ATL_RPB_RPF_RX_TC_MODE_DEFAULT 0x0
 
 /* rx rx_buf_en bitfield definitions
  * preprocessor definitions for the bitfield "rx_buf_en".
@@ -528,17 +534,17 @@
  */
 
 /* register address for bitfield rx_buf_en */
-#define rpb_rx_buf_en_adr 0x00005700
+#define HW_ATL_RPB_RX_BUF_EN_ADR 0x00005700
 /* bitmask for bitfield rx_buf_en */
-#define rpb_rx_buf_en_msk 0x00000001
+#define HW_ATL_RPB_RX_BUF_EN_MSK 0x00000001
 /* inverted bitmask for bitfield rx_buf_en */
-#define rpb_rx_buf_en_mskn 0xfffffffe
+#define HW_ATL_RPB_RX_BUF_EN_MSKN 0xfffffffe
 /* lower bit position of bitfield rx_buf_en */
-#define rpb_rx_buf_en_shift 0
+#define HW_ATL_RPB_RX_BUF_EN_SHIFT 0
 /* width of bitfield rx_buf_en */
-#define rpb_rx_buf_en_width 1
+#define HW_ATL_RPB_RX_BUF_EN_WIDTH 1
 /* default value of bitfield rx_buf_en */
-#define rpb_rx_buf_en_default 0x0
+#define HW_ATL_RPB_RX_BUF_EN_DEFAULT 0x0
 
 /* rx rx{b}_hi_thresh[d:0] bitfield definitions
  * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]".
@@ -547,17 +553,17 @@
  */
 
 /* register address for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBHI_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
 /* bitmask for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_msk 0x3fff0000
+#define HW_ATL_RPB_RXBHI_THRESH_MSK 0x3fff0000
 /* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_mskn 0xc000ffff
+#define HW_ATL_RPB_RXBHI_THRESH_MSKN 0xc000ffff
 /* lower bit position of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_shift 16
+#define HW_ATL_RPB_RXBHI_THRESH_SHIFT 16
 /* width of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_width 14
+#define HW_ATL_RPB_RXBHI_THRESH_WIDTH 14
 /* default value of bitfield rx{b}_hi_thresh[d:0] */
-#define rpb_rxbhi_thresh_default 0x0
+#define HW_ATL_RPB_RXBHI_THRESH_DEFAULT 0x0
 
 /* rx rx{b}_lo_thresh[d:0] bitfield definitions
  * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]".
@@ -566,17 +572,17 @@
  */
 
 /* register address for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBLO_THRESH_ADR(buffer) (0x00005714 + (buffer) * 0x10)
 /* bitmask for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_msk 0x00003fff
+#define HW_ATL_RPB_RXBLO_THRESH_MSK 0x00003fff
 /* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_mskn 0xffffc000
+#define HW_ATL_RPB_RXBLO_THRESH_MSKN 0xffffc000
 /* lower bit position of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_shift 0
+#define HW_ATL_RPB_RXBLO_THRESH_SHIFT 0
 /* width of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_width 14
+#define HW_ATL_RPB_RXBLO_THRESH_WIDTH 14
 /* default value of bitfield rx{b}_lo_thresh[d:0] */
-#define rpb_rxblo_thresh_default 0x0
+#define HW_ATL_RPB_RXBLO_THRESH_DEFAULT 0x0
 
 /* rx rx_fc_mode[1:0] bitfield definitions
  * preprocessor definitions for the bitfield "rx_fc_mode[1:0]".
@@ -584,17 +590,17 @@
  */
 
 /* register address for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_adr 0x00005700
+#define HW_ATL_RPB_RX_FC_MODE_ADR 0x00005700
 /* bitmask for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_msk 0x00000030
+#define HW_ATL_RPB_RX_FC_MODE_MSK 0x00000030
 /* inverted bitmask for bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_mskn 0xffffffcf
+#define HW_ATL_RPB_RX_FC_MODE_MSKN 0xffffffcf
 /* lower bit position of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_shift 4
+#define HW_ATL_RPB_RX_FC_MODE_SHIFT 4
 /* width of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_width 2
+#define HW_ATL_RPB_RX_FC_MODE_WIDTH 2
 /* default value of bitfield rx_fc_mode[1:0] */
-#define rpb_rx_fc_mode_default 0x0
+#define HW_ATL_RPB_RX_FC_MODE_DEFAULT 0x0
 
 /* rx rx{b}_buf_size[8:0] bitfield definitions
  * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]".
@@ -603,17 +609,17 @@
  */
 
 /* register address for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBBUF_SIZE_ADR(buffer) (0x00005710 + (buffer) * 0x10)
 /* bitmask for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_msk 0x000001ff
+#define HW_ATL_RPB_RXBBUF_SIZE_MSK 0x000001ff
 /* inverted bitmask for bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_mskn 0xfffffe00
+#define HW_ATL_RPB_RXBBUF_SIZE_MSKN 0xfffffe00
 /* lower bit position of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_shift 0
+#define HW_ATL_RPB_RXBBUF_SIZE_SHIFT 0
 /* width of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_width 9
+#define HW_ATL_RPB_RXBBUF_SIZE_WIDTH 9
 /* default value of bitfield rx{b}_buf_size[8:0] */
-#define rpb_rxbbuf_size_default 0x0
+#define HW_ATL_RPB_RXBBUF_SIZE_DEFAULT 0x0
 
 /* rx rx{b}_xoff_en bitfield definitions
  * preprocessor definitions for the bitfield "rx{b}_xoff_en".
@@ -622,17 +628,17 @@
  */
 
 /* register address for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10)
+#define HW_ATL_RPB_RXBXOFF_EN_ADR(buffer) (0x00005714 + (buffer) * 0x10)
 /* bitmask for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_msk 0x80000000
+#define HW_ATL_RPB_RXBXOFF_EN_MSK 0x80000000
 /* inverted bitmask for bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_mskn 0x7fffffff
+#define HW_ATL_RPB_RXBXOFF_EN_MSKN 0x7fffffff
 /* lower bit position of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_shift 31
+#define HW_ATL_RPB_RXBXOFF_EN_SHIFT 31
 /* width of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_width 1
+#define HW_ATL_RPB_RXBXOFF_EN_WIDTH 1
 /* default value of bitfield rx{b}_xoff_en */
-#define rpb_rxbxoff_en_default 0x0
+#define HW_ATL_RPB_RXBXOFF_EN_DEFAULT 0x0
 
 /* rx l2_bc_thresh[f:0] bitfield definitions
  * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]".
@@ -640,17 +646,17 @@
  */
 
 /* register address for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_adr 0x00005100
+#define HW_ATL_RPFL2BC_THRESH_ADR 0x00005100
 /* bitmask for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_msk 0xffff0000
+#define HW_ATL_RPFL2BC_THRESH_MSK 0xffff0000
 /* inverted bitmask for bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_mskn 0x0000ffff
+#define HW_ATL_RPFL2BC_THRESH_MSKN 0x0000ffff
 /* lower bit position of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_shift 16
+#define HW_ATL_RPFL2BC_THRESH_SHIFT 16
 /* width of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_width 16
+#define HW_ATL_RPFL2BC_THRESH_WIDTH 16
 /* default value of bitfield l2_bc_thresh[f:0] */
-#define rpfl2bc_thresh_default 0x0
+#define HW_ATL_RPFL2BC_THRESH_DEFAULT 0x0
 
 /* rx l2_bc_en bitfield definitions
  * preprocessor definitions for the bitfield "l2_bc_en".
@@ -658,17 +664,17 @@
  */
 
 /* register address for bitfield l2_bc_en */
-#define rpfl2bc_en_adr 0x00005100
+#define HW_ATL_RPFL2BC_EN_ADR 0x00005100
 /* bitmask for bitfield l2_bc_en */
-#define rpfl2bc_en_msk 0x00000001
+#define HW_ATL_RPFL2BC_EN_MSK 0x00000001
 /* inverted bitmask for bitfield l2_bc_en */
-#define rpfl2bc_en_mskn 0xfffffffe
+#define HW_ATL_RPFL2BC_EN_MSKN 0xfffffffe
 /* lower bit position of bitfield l2_bc_en */
-#define rpfl2bc_en_shift 0
+#define HW_ATL_RPFL2BC_EN_SHIFT 0
 /* width of bitfield l2_bc_en */
-#define rpfl2bc_en_width 1
+#define HW_ATL_RPFL2BC_EN_WIDTH 1
 /* default value of bitfield l2_bc_en */
-#define rpfl2bc_en_default 0x0
+#define HW_ATL_RPFL2BC_EN_DEFAULT 0x0
 
 /* rx l2_bc_act[2:0] bitfield definitions
  * preprocessor definitions for the bitfield "l2_bc_act[2:0]".
@@ -676,17 +682,17 @@
  */
 
 /* register address for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_adr 0x00005100
+#define HW_ATL_RPFL2BC_ACT_ADR 0x00005100
 /* bitmask for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_msk 0x00007000
+#define HW_ATL_RPFL2BC_ACT_MSK 0x00007000
 /* inverted bitmask for bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_mskn 0xffff8fff
+#define HW_ATL_RPFL2BC_ACT_MSKN 0xffff8fff
 /* lower bit position of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_shift 12
+#define HW_ATL_RPFL2BC_ACT_SHIFT 12
 /* width of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_width 3
+#define HW_ATL_RPFL2BC_ACT_WIDTH 3
 /* default value of bitfield l2_bc_act[2:0] */
-#define rpfl2bc_act_default 0x0
+#define HW_ATL_RPFL2BC_ACT_DEFAULT 0x0
 
 /* rx l2_mc_en{f} bitfield definitions
  * preprocessor definitions for the bitfield "l2_mc_en{f}".
@@ -695,17 +701,17 @@
  */
 
 /* register address for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4)
+#define HW_ATL_RPFL2MC_ENF_ADR(filter) (0x00005250 + (filter) * 0x4)
 /* bitmask for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_msk 0x80000000
+#define HW_ATL_RPFL2MC_ENF_MSK 0x80000000
 /* inverted bitmask for bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_mskn 0x7fffffff
+#define HW_ATL_RPFL2MC_ENF_MSKN 0x7fffffff
 /* lower bit position of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_shift 31
+#define HW_ATL_RPFL2MC_ENF_SHIFT 31
 /* width of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_width 1
+#define HW_ATL_RPFL2MC_ENF_WIDTH 1
 /* default value of bitfield l2_mc_en{f} */
-#define rpfl2mc_enf_default 0x0
+#define HW_ATL_RPFL2MC_ENF_DEFAULT 0x0
 
 /* rx l2_promis_mode bitfield definitions
  * preprocessor definitions for the bitfield "l2_promis_mode".
@@ -713,17 +719,17 @@
  */
 
 /* register address for bitfield l2_promis_mode */
-#define rpfl2promis_mode_adr 0x00005100
+#define HW_ATL_RPFL2PROMIS_MODE_ADR 0x00005100
 /* bitmask for bitfield l2_promis_mode */
-#define rpfl2promis_mode_msk 0x00000008
+#define HW_ATL_RPFL2PROMIS_MODE_MSK 0x00000008
 /* inverted bitmask for bitfield l2_promis_mode */
-#define rpfl2promis_mode_mskn 0xfffffff7
+#define HW_ATL_RPFL2PROMIS_MODE_MSKN 0xfffffff7
 /* lower bit position of bitfield l2_promis_mode */
-#define rpfl2promis_mode_shift 3
+#define HW_ATL_RPFL2PROMIS_MODE_SHIFT 3
 /* width of bitfield l2_promis_mode */
-#define rpfl2promis_mode_width 1
+#define HW_ATL_RPFL2PROMIS_MODE_WIDTH 1
 /* default value of bitfield l2_promis_mode */
-#define rpfl2promis_mode_default 0x0
+#define HW_ATL_RPFL2PROMIS_MODE_DEFAULT 0x0
 
 /* rx l2_uc_act{f}[2:0] bitfield definitions
  * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]".
@@ -732,17 +738,17 @@
  */
 
 /* register address for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_ACTF_ADR(filter) (0x00005114 + (filter) * 0x8)
 /* bitmask for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_msk 0x00070000
+#define HW_ATL_RPFL2UC_ACTF_MSK 0x00070000
 /* inverted bitmask for bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_mskn 0xfff8ffff
+#define HW_ATL_RPFL2UC_ACTF_MSKN 0xfff8ffff
 /* lower bit position of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_shift 16
+#define HW_ATL_RPFL2UC_ACTF_SHIFT 16
 /* width of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_width 3
+#define HW_ATL_RPFL2UC_ACTF_WIDTH 3
 /* default value of bitfield l2_uc_act{f}[2:0] */
-#define rpfl2uc_actf_default 0x0
+#define HW_ATL_RPFL2UC_ACTF_DEFAULT 0x0
 
 /* rx l2_uc_en{f} bitfield definitions
  * preprocessor definitions for the bitfield "l2_uc_en{f}".
@@ -751,26 +757,26 @@
  */
 
 /* register address for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_ENF_ADR(filter) (0x00005114 + (filter) * 0x8)
 /* bitmask for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_msk 0x80000000
+#define HW_ATL_RPFL2UC_ENF_MSK 0x80000000
 /* inverted bitmask for bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_mskn 0x7fffffff
+#define HW_ATL_RPFL2UC_ENF_MSKN 0x7fffffff
 /* lower bit position of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_shift 31
+#define HW_ATL_RPFL2UC_ENF_SHIFT 31
 /* width of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_width 1
+#define HW_ATL_RPFL2UC_ENF_WIDTH 1
 /* default value of bitfield l2_uc_en{f} */
-#define rpfl2uc_enf_default 0x0
+#define HW_ATL_RPFL2UC_ENF_DEFAULT 0x0
 
 /* register address for bitfield l2_uc_da{f}_lsw[1f:0] */
-#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_DAFLSW_ADR(filter) (0x00005110 + (filter) * 0x8)
 /* register address for bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8)
+#define HW_ATL_RPFL2UC_DAFMSW_ADR(filter) (0x00005114 + (filter) * 0x8)
 /* bitmask for bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_msk 0x0000ffff
+#define HW_ATL_RPFL2UC_DAFMSW_MSK 0x0000ffff
 /* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */
-#define rpfl2uc_dafmsw_shift 0
+#define HW_ATL_RPFL2UC_DAFMSW_SHIFT 0
 
 /* rx l2_mc_accept_all bitfield definitions
  * Preprocessor definitions for the bitfield "l2_mc_accept_all".
@@ -778,22 +784,22 @@
  */
 
 /* Register address for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_adr 0x00005270
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_ADR 0x00005270
 /* Bitmask for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_msk 0x00004000
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSK 0x00004000
 /* Inverted bitmask for bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_mskn 0xFFFFBFFF
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_MSKN 0xFFFFBFFF
 /* Lower bit position of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_shift 14
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_SHIFT 14
 /* Width of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_width 1
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_WIDTH 1
 /* Default value of bitfield l2_mc_accept_all */
-#define rpfl2mc_accept_all_default 0x0
+#define HW_ATL_RPFL2MC_ACCEPT_ALL_DEFAULT 0x0
 
 /* width of bitfield rx_tc_up{t}[2:0] */
-#define rpf_rpb_rx_tc_upt_width 3
+#define HW_ATL_RPF_RPB_RX_TC_UPT_WIDTH 3
 /* default value of bitfield rx_tc_up{t}[2:0] */
-#define rpf_rpb_rx_tc_upt_default 0x0
+#define HW_ATL_RPF_RPB_RX_TC_UPT_DEFAULT 0x0
 
 /* rx rss_key_addr[4:0] bitfield definitions
  * preprocessor definitions for the bitfield "rss_key_addr[4:0]".
@@ -801,17 +807,17 @@
  */
 
 /* register address for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_adr 0x000054d0
+#define HW_ATL_RPF_RSS_KEY_ADDR_ADR 0x000054d0
 /* bitmask for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_msk 0x0000001f
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSK 0x0000001f
 /* inverted bitmask for bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_mskn 0xffffffe0
+#define HW_ATL_RPF_RSS_KEY_ADDR_MSKN 0xffffffe0
 /* lower bit position of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_shift 0
+#define HW_ATL_RPF_RSS_KEY_ADDR_SHIFT 0
 /* width of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_width 5
+#define HW_ATL_RPF_RSS_KEY_ADDR_WIDTH 5
 /* default value of bitfield rss_key_addr[4:0] */
-#define rpf_rss_key_addr_default 0x0
+#define HW_ATL_RPF_RSS_KEY_ADDR_DEFAULT 0x0
 
 /* rx rss_key_wr_data[1f:0] bitfield definitions
  * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]".
@@ -819,17 +825,17 @@
  */
 
 /* register address for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_adr 0x000054d4
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_ADR 0x000054d4
 /* bitmask for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_msk 0xffffffff
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSK 0xffffffff
 /* inverted bitmask for bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_mskn 0x00000000
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_MSKN 0x00000000
 /* lower bit position of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_shift 0
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_SHIFT 0
 /* width of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_width 32
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_WIDTH 32
 /* default value of bitfield rss_key_wr_data[1f:0] */
-#define rpf_rss_key_wr_data_default 0x0
+#define HW_ATL_RPF_RSS_KEY_WR_DATA_DEFAULT 0x0
 
 /* rx rss_key_wr_en_i bitfield definitions
  * preprocessor definitions for the bitfield "rss_key_wr_en_i".
@@ -837,17 +843,17 @@
  */
 
 /* register address for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_adr 0x000054d0
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_ADR 0x000054d0
 /* bitmask for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_msk 0x00000020
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSK 0x00000020
 /* inverted bitmask for bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_mskn 0xffffffdf
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_MSKN 0xffffffdf
 /* lower bit position of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_shift 5
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_SHIFT 5
 /* width of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_width 1
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_WIDTH 1
 /* default value of bitfield rss_key_wr_en_i */
-#define rpf_rss_key_wr_eni_default 0x0
+#define HW_ATL_RPF_RSS_KEY_WR_ENI_DEFAULT 0x0
 
 /* rx rss_redir_addr[3:0] bitfield definitions
  * preprocessor definitions for the bitfield "rss_redir_addr[3:0]".
@@ -855,17 +861,17 @@
  */
 
 /* register address for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_adr 0x000054e0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_ADR 0x000054e0
 /* bitmask for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_msk 0x0000000f
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSK 0x0000000f
 /* inverted bitmask for bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_mskn 0xfffffff0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_MSKN 0xfffffff0
 /* lower bit position of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_shift 0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_SHIFT 0
 /* width of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_width 4
+#define HW_ATL_RPF_RSS_REDIR_ADDR_WIDTH 4
 /* default value of bitfield rss_redir_addr[3:0] */
-#define rpf_rss_redir_addr_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_ADDR_DEFAULT 0x0
 
 /* rx rss_redir_wr_data[f:0] bitfield definitions
  * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]".
@@ -873,17 +879,17 @@
  */
 
 /* register address for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_adr 0x000054e4
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_ADR 0x000054e4
 /* bitmask for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_msk 0x0000ffff
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSK 0x0000ffff
 /* inverted bitmask for bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_mskn 0xffff0000
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_MSKN 0xffff0000
 /* lower bit position of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_shift 0
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_SHIFT 0
 /* width of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_width 16
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_WIDTH 16
 /* default value of bitfield rss_redir_wr_data[f:0] */
-#define rpf_rss_redir_wr_data_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_WR_DATA_DEFAULT 0x0
 
 /* rx rss_redir_wr_en_i bitfield definitions
  * preprocessor definitions for the bitfield "rss_redir_wr_en_i".
@@ -891,17 +897,17 @@
  */
 
 /* register address for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_adr 0x000054e0
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_ADR 0x000054e0
 /* bitmask for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_msk 0x00000010
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSK 0x00000010
 /* inverted bitmask for bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_mskn 0xffffffef
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_MSKN 0xffffffef
 /* lower bit position of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_shift 4
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_SHIFT 4
 /* width of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_width 1
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_WIDTH 1
 /* default value of bitfield rss_redir_wr_en_i */
-#define rpf_rss_redir_wr_eni_default 0x0
+#define HW_ATL_RPF_RSS_REDIR_WR_ENI_DEFAULT 0x0
 
 /* rx tpo_rpf_sys_loopback bitfield definitions
  * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback".
@@ -909,17 +915,17 @@
  */
 
 /* register address for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_adr 0x00005000
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_ADR 0x00005000
 /* bitmask for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_msk 0x00000100
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSK 0x00000100
 /* inverted bitmask for bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_MSKN 0xfffffeff
 /* lower bit position of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_shift 8
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_SHIFT 8
 /* width of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_width 1
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_WIDTH 1
 /* default value of bitfield tpo_rpf_sys_loopback */
-#define rpf_tpo_rpf_sys_lbk_default 0x0
+#define HW_ATL_RPF_TPO_RPF_SYS_LBK_DEFAULT 0x0
 
 /* rx vl_inner_tpid[f:0] bitfield definitions
  * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]".
@@ -927,17 +933,17 @@
  */
 
 /* register address for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_adr 0x00005284
+#define HW_ATL_RPF_VL_INNER_TPID_ADR 0x00005284
 /* bitmask for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_msk 0x0000ffff
+#define HW_ATL_RPF_VL_INNER_TPID_MSK 0x0000ffff
 /* inverted bitmask for bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_mskn 0xffff0000
+#define HW_ATL_RPF_VL_INNER_TPID_MSKN 0xffff0000
 /* lower bit position of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_shift 0
+#define HW_ATL_RPF_VL_INNER_TPID_SHIFT 0
 /* width of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_width 16
+#define HW_ATL_RPF_VL_INNER_TPID_WIDTH 16
 /* default value of bitfield vl_inner_tpid[f:0] */
-#define rpf_vl_inner_tpid_default 0x8100
+#define HW_ATL_RPF_VL_INNER_TPID_DEFAULT 0x8100
 
 /* rx vl_outer_tpid[f:0] bitfield definitions
  * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]".
@@ -945,17 +951,17 @@
  */
 
 /* register address for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_adr 0x00005284
+#define HW_ATL_RPF_VL_OUTER_TPID_ADR 0x00005284
 /* bitmask for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_msk 0xffff0000
+#define HW_ATL_RPF_VL_OUTER_TPID_MSK 0xffff0000
 /* inverted bitmask for bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_mskn 0x0000ffff
+#define HW_ATL_RPF_VL_OUTER_TPID_MSKN 0x0000ffff
 /* lower bit position of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_shift 16
+#define HW_ATL_RPF_VL_OUTER_TPID_SHIFT 16
 /* width of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_width 16
+#define HW_ATL_RPF_VL_OUTER_TPID_WIDTH 16
 /* default value of bitfield vl_outer_tpid[f:0] */
-#define rpf_vl_outer_tpid_default 0x88a8
+#define HW_ATL_RPF_VL_OUTER_TPID_DEFAULT 0x88a8
 
 /* rx vl_promis_mode bitfield definitions
  * preprocessor definitions for the bitfield "vl_promis_mode".
@@ -963,17 +969,17 @@
  */
 
 /* register address for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_adr 0x00005280
+#define HW_ATL_RPF_VL_PROMIS_MODE_ADR 0x00005280
 /* bitmask for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_msk 0x00000002
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSK 0x00000002
 /* inverted bitmask for bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_mskn 0xfffffffd
+#define HW_ATL_RPF_VL_PROMIS_MODE_MSKN 0xfffffffd
 /* lower bit position of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_shift 1
+#define HW_ATL_RPF_VL_PROMIS_MODE_SHIFT 1
 /* width of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_width 1
+#define HW_ATL_RPF_VL_PROMIS_MODE_WIDTH 1
 /* default value of bitfield vl_promis_mode */
-#define rpf_vl_promis_mode_default 0x0
+#define HW_ATL_RPF_VL_PROMIS_MODE_DEFAULT 0x0
 
 /* RX vl_accept_untagged_mode Bitfield Definitions
  * Preprocessor definitions for the bitfield "vl_accept_untagged_mode".
@@ -981,17 +987,17 @@
  */
 
 /* Register address for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_adr 0x00005280
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_ADR 0x00005280
 /* Bitmask for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_msk 0x00000004
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSK 0x00000004
 /* Inverted bitmask for bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_MSKN 0xFFFFFFFB
 /* Lower bit position of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_shift 2
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_SHIFT 2
 /* Width of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_width 1
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_WIDTH 1
 /* Default value of bitfield vl_accept_untagged_mode */
-#define rpf_vl_accept_untagged_mode_default 0x0
+#define HW_ATL_RPF_VL_ACCEPT_UNTAGGED_MODE_DEFAULT 0x0
 
 /* rX vl_untagged_act[2:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]".
@@ -999,17 +1005,17 @@
  */
 
 /* Register address for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_adr 0x00005280
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_ADR 0x00005280
 /* Bitmask for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_msk 0x00000038
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSK 0x00000038
 /* Inverted bitmask for bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_mskn 0xFFFFFFC7
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_MSKN 0xFFFFFFC7
 /* Lower bit position of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_shift 3
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_SHIFT 3
 /* Width of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_width 3
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_WIDTH 3
 /* Default value of bitfield vl_untagged_act[2:0] */
-#define rpf_vl_untagged_act_default 0x0
+#define HW_ATL_RPF_VL_UNTAGGED_ACT_DEFAULT 0x0
 
 /* RX vl_en{F} Bitfield Definitions
  * Preprocessor definitions for the bitfield "vl_en{F}".
@@ -1018,17 +1024,17 @@
  */
 
 /* Register address for bitfield vl_en{F} */
-#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
 /* Bitmask for bitfield vl_en{F} */
-#define rpf_vl_en_f_msk 0x80000000
+#define HW_ATL_RPF_VL_EN_F_MSK 0x80000000
 /* Inverted bitmask for bitfield vl_en{F} */
-#define rpf_vl_en_f_mskn 0x7FFFFFFF
+#define HW_ATL_RPF_VL_EN_F_MSKN 0x7FFFFFFF
 /* Lower bit position of bitfield vl_en{F} */
-#define rpf_vl_en_f_shift 31
+#define HW_ATL_RPF_VL_EN_F_SHIFT 31
 /* Width of bitfield vl_en{F} */
-#define rpf_vl_en_f_width 1
+#define HW_ATL_RPF_VL_EN_F_WIDTH 1
 /* Default value of bitfield vl_en{F} */
-#define rpf_vl_en_f_default 0x0
+#define HW_ATL_RPF_VL_EN_F_DEFAULT 0x0
 
 /* RX vl_act{F}[2:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "vl_act{F}[2:0]".
@@ -1037,17 +1043,17 @@
  */
 
 /* Register address for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_ACT_F_ADR(filter) (0x00005290 + (filter) * 0x4)
 /* Bitmask for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_msk 0x00070000
+#define HW_ATL_RPF_VL_ACT_F_MSK 0x00070000
 /* Inverted bitmask for bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_mskn 0xFFF8FFFF
+#define HW_ATL_RPF_VL_ACT_F_MSKN 0xFFF8FFFF
 /* Lower bit position of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_shift 16
+#define HW_ATL_RPF_VL_ACT_F_SHIFT 16
 /* Width of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_width 3
+#define HW_ATL_RPF_VL_ACT_F_WIDTH 3
 /* Default value of bitfield vl_act{F}[2:0] */
-#define rpf_vl_act_f_default 0x0
+#define HW_ATL_RPF_VL_ACT_F_DEFAULT 0x0
 
 /* RX vl_id{F}[B:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "vl_id{F}[B:0]".
@@ -1056,17 +1062,17 @@
  */
 
 /* Register address for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4)
+#define HW_ATL_RPF_VL_ID_F_ADR(filter) (0x00005290 + (filter) * 0x4)
 /* Bitmask for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_msk 0x00000FFF
+#define HW_ATL_RPF_VL_ID_F_MSK 0x00000FFF
 /* Inverted bitmask for bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_mskn 0xFFFFF000
+#define HW_ATL_RPF_VL_ID_F_MSKN 0xFFFFF000
 /* Lower bit position of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_shift 0
+#define HW_ATL_RPF_VL_ID_F_SHIFT 0
 /* Width of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_width 12
+#define HW_ATL_RPF_VL_ID_F_WIDTH 12
 /* Default value of bitfield vl_id{F}[B:0] */
-#define rpf_vl_id_f_default 0x0
+#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
 
 /* RX et_en{F} Bitfield Definitions
  * Preprocessor definitions for the bitfield "et_en{F}".
@@ -1075,17 +1081,17 @@
  */
 
 /* Register address for bitfield et_en{F} */
-#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* Bitmask for bitfield et_en{F} */
-#define rpf_et_en_f_msk 0x80000000
+#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
 /* Inverted bitmask for bitfield et_en{F} */
-#define rpf_et_en_f_mskn 0x7FFFFFFF
+#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
 /* Lower bit position of bitfield et_en{F} */
-#define rpf_et_en_f_shift 31
+#define HW_ATL_RPF_ET_EN_F_SHIFT 31
 /* Width of bitfield et_en{F} */
-#define rpf_et_en_f_width 1
+#define HW_ATL_RPF_ET_EN_F_WIDTH 1
 /* Default value of bitfield et_en{F} */
-#define rpf_et_en_f_default 0x0
+#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
 
 /* rx et_en{f} bitfield definitions
  * preprocessor definitions for the bitfield "et_en{f}".
@@ -1094,17 +1100,17 @@
  */
 
 /* register address for bitfield et_en{f} */
-#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_ENF_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_en{f} */
-#define rpf_et_enf_msk 0x80000000
+#define HW_ATL_RPF_ET_ENF_MSK 0x80000000
 /* inverted bitmask for bitfield et_en{f} */
-#define rpf_et_enf_mskn 0x7fffffff
+#define HW_ATL_RPF_ET_ENF_MSKN 0x7fffffff
 /* lower bit position of bitfield et_en{f} */
-#define rpf_et_enf_shift 31
+#define HW_ATL_RPF_ET_ENF_SHIFT 31
 /* width of bitfield et_en{f} */
-#define rpf_et_enf_width 1
+#define HW_ATL_RPF_ET_ENF_WIDTH 1
 /* default value of bitfield et_en{f} */
-#define rpf_et_enf_default 0x0
+#define HW_ATL_RPF_ET_ENF_DEFAULT 0x0
 
 /* rx et_up{f}_en bitfield definitions
  * preprocessor definitions for the bitfield "et_up{f}_en".
@@ -1113,17 +1119,17 @@
  */
 
 /* register address for bitfield et_up{f}_en */
-#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_UPFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_up{f}_en */
-#define rpf_et_upfen_msk 0x40000000
+#define HW_ATL_RPF_ET_UPFEN_MSK 0x40000000
 /* inverted bitmask for bitfield et_up{f}_en */
-#define rpf_et_upfen_mskn 0xbfffffff
+#define HW_ATL_RPF_ET_UPFEN_MSKN 0xbfffffff
 /* lower bit position of bitfield et_up{f}_en */
-#define rpf_et_upfen_shift 30
+#define HW_ATL_RPF_ET_UPFEN_SHIFT 30
 /* width of bitfield et_up{f}_en */
-#define rpf_et_upfen_width 1
+#define HW_ATL_RPF_ET_UPFEN_WIDTH 1
 /* default value of bitfield et_up{f}_en */
-#define rpf_et_upfen_default 0x0
+#define HW_ATL_RPF_ET_UPFEN_DEFAULT 0x0
 
 /* rx et_rxq{f}_en bitfield definitions
  * preprocessor definitions for the bitfield "et_rxq{f}_en".
@@ -1132,17 +1138,17 @@
  */
 
 /* register address for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_RXQFEN_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_msk 0x20000000
+#define HW_ATL_RPF_ET_RXQFEN_MSK 0x20000000
 /* inverted bitmask for bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_mskn 0xdfffffff
+#define HW_ATL_RPF_ET_RXQFEN_MSKN 0xdfffffff
 /* lower bit position of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_shift 29
+#define HW_ATL_RPF_ET_RXQFEN_SHIFT 29
 /* width of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_width 1
+#define HW_ATL_RPF_ET_RXQFEN_WIDTH 1
 /* default value of bitfield et_rxq{f}_en */
-#define rpf_et_rxqfen_default 0x0
+#define HW_ATL_RPF_ET_RXQFEN_DEFAULT 0x0
 
 /* rx et_up{f}[2:0] bitfield definitions
  * preprocessor definitions for the bitfield "et_up{f}[2:0]".
@@ -1151,17 +1157,17 @@
  */
 
 /* register address for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_UPF_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_msk 0x1c000000
+#define HW_ATL_RPF_ET_UPF_MSK 0x1c000000
 /* inverted bitmask for bitfield et_up{f}[2:0] */
-#define rpf_et_upf_mskn 0xe3ffffff
+#define HW_ATL_RPF_ET_UPF_MSKN 0xe3ffffff
 /* lower bit position of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_shift 26
+#define HW_ATL_RPF_ET_UPF_SHIFT 26
 /* width of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_width 3
+#define HW_ATL_RPF_ET_UPF_WIDTH 3
 /* default value of bitfield et_up{f}[2:0] */
-#define rpf_et_upf_default 0x0
+#define HW_ATL_RPF_ET_UPF_DEFAULT 0x0
 
 /* rx et_rxq{f}[4:0] bitfield definitions
  * preprocessor definitions for the bitfield "et_rxq{f}[4:0]".
@@ -1170,17 +1176,17 @@
  */
 
 /* register address for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_msk 0x01f00000
+#define HW_ATL_RPF_ET_RXQF_MSK 0x01f00000
 /* inverted bitmask for bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_mskn 0xfe0fffff
+#define HW_ATL_RPF_ET_RXQF_MSKN 0xfe0fffff
 /* lower bit position of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_shift 20
+#define HW_ATL_RPF_ET_RXQF_SHIFT 20
 /* width of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_width 5
+#define HW_ATL_RPF_ET_RXQF_WIDTH 5
 /* default value of bitfield et_rxq{f}[4:0] */
-#define rpf_et_rxqf_default 0x0
+#define HW_ATL_RPF_ET_RXQF_DEFAULT 0x0
 
 /* rx et_mng_rxq{f} bitfield definitions
  * preprocessor definitions for the bitfield "et_mng_rxq{f}".
@@ -1189,17 +1195,17 @@
  */
 
 /* register address for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_MNG_RXQF_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_msk 0x00080000
+#define HW_ATL_RPF_ET_MNG_RXQF_MSK 0x00080000
 /* inverted bitmask for bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_mskn 0xfff7ffff
+#define HW_ATL_RPF_ET_MNG_RXQF_MSKN 0xfff7ffff
 /* lower bit position of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_shift 19
+#define HW_ATL_RPF_ET_MNG_RXQF_SHIFT 19
 /* width of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_width 1
+#define HW_ATL_RPF_ET_MNG_RXQF_WIDTH 1
 /* default value of bitfield et_mng_rxq{f} */
-#define rpf_et_mng_rxqf_default 0x0
+#define HW_ATL_RPF_ET_MNG_RXQF_DEFAULT 0x0
 
 /* rx et_act{f}[2:0] bitfield definitions
  * preprocessor definitions for the bitfield "et_act{f}[2:0]".
@@ -1208,17 +1214,17 @@
  */
 
 /* register address for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_ACTF_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_msk 0x00070000
+#define HW_ATL_RPF_ET_ACTF_MSK 0x00070000
 /* inverted bitmask for bitfield et_act{f}[2:0] */
-#define rpf_et_actf_mskn 0xfff8ffff
+#define HW_ATL_RPF_ET_ACTF_MSKN 0xfff8ffff
 /* lower bit position of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_shift 16
+#define HW_ATL_RPF_ET_ACTF_SHIFT 16
 /* width of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_width 3
+#define HW_ATL_RPF_ET_ACTF_WIDTH 3
 /* default value of bitfield et_act{f}[2:0] */
-#define rpf_et_actf_default 0x0
+#define HW_ATL_RPF_ET_ACTF_DEFAULT 0x0
 
 /* rx et_val{f}[f:0] bitfield definitions
  * preprocessor definitions for the bitfield "et_val{f}[f:0]".
@@ -1227,17 +1233,17 @@
  */
 
 /* register address for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4)
+#define HW_ATL_RPF_ET_VALF_ADR(filter) (0x00005300 + (filter) * 0x4)
 /* bitmask for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_msk 0x0000ffff
+#define HW_ATL_RPF_ET_VALF_MSK 0x0000ffff
 /* inverted bitmask for bitfield et_val{f}[f:0] */
-#define rpf_et_valf_mskn 0xffff0000
+#define HW_ATL_RPF_ET_VALF_MSKN 0xffff0000
 /* lower bit position of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_shift 0
+#define HW_ATL_RPF_ET_VALF_SHIFT 0
 /* width of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_width 16
+#define HW_ATL_RPF_ET_VALF_WIDTH 16
 /* default value of bitfield et_val{f}[f:0] */
-#define rpf_et_valf_default 0x0
+#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
 
 /* rx ipv4_chk_en bitfield definitions
  * preprocessor definitions for the bitfield "ipv4_chk_en".
@@ -1245,17 +1251,17 @@
  */
 
 /* register address for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_adr 0x00005580
+#define HW_ATL_RPO_IPV4CHK_EN_ADR 0x00005580
 /* bitmask for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_msk 0x00000002
+#define HW_ATL_RPO_IPV4CHK_EN_MSK 0x00000002
 /* inverted bitmask for bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_mskn 0xfffffffd
+#define HW_ATL_RPO_IPV4CHK_EN_MSKN 0xfffffffd
 /* lower bit position of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_shift 1
+#define HW_ATL_RPO_IPV4CHK_EN_SHIFT 1
 /* width of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_width 1
+#define HW_ATL_RPO_IPV4CHK_EN_WIDTH 1
 /* default value of bitfield ipv4_chk_en */
-#define rpo_ipv4chk_en_default 0x0
+#define HW_ATL_RPO_IPV4CHK_EN_DEFAULT 0x0
 
 /* rx desc{d}_vl_strip bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_vl_strip".
@@ -1264,17 +1270,18 @@
  */
 
 /* register address for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+#define HW_ATL_RPO_DESCDVL_STRIP_ADR(descriptor) \
+	(0x00005b08 + (descriptor) * 0x20)
 /* bitmask for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_msk 0x20000000
+#define HW_ATL_RPO_DESCDVL_STRIP_MSK 0x20000000
 /* inverted bitmask for bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_mskn 0xdfffffff
+#define HW_ATL_RPO_DESCDVL_STRIP_MSKN 0xdfffffff
 /* lower bit position of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_shift 29
+#define HW_ATL_RPO_DESCDVL_STRIP_SHIFT 29
 /* width of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_width 1
+#define HW_ATL_RPO_DESCDVL_STRIP_WIDTH 1
 /* default value of bitfield desc{d}_vl_strip */
-#define rpo_descdvl_strip_default 0x0
+#define HW_ATL_RPO_DESCDVL_STRIP_DEFAULT 0x0
 
 /* rx l4_chk_en bitfield definitions
  * preprocessor definitions for the bitfield "l4_chk_en".
@@ -1282,17 +1289,17 @@
  */
 
 /* register address for bitfield l4_chk_en */
-#define rpol4chk_en_adr 0x00005580
+#define HW_ATL_RPOL4CHK_EN_ADR 0x00005580
 /* bitmask for bitfield l4_chk_en */
-#define rpol4chk_en_msk 0x00000001
+#define HW_ATL_RPOL4CHK_EN_MSK 0x00000001
 /* inverted bitmask for bitfield l4_chk_en */
-#define rpol4chk_en_mskn 0xfffffffe
+#define HW_ATL_RPOL4CHK_EN_MSKN 0xfffffffe
 /* lower bit position of bitfield l4_chk_en */
-#define rpol4chk_en_shift 0
+#define HW_ATL_RPOL4CHK_EN_SHIFT 0
 /* width of bitfield l4_chk_en */
-#define rpol4chk_en_width 1
+#define HW_ATL_RPOL4CHK_EN_WIDTH 1
 /* default value of bitfield l4_chk_en */
-#define rpol4chk_en_default 0x0
+#define HW_ATL_RPOL4CHK_EN_DEFAULT 0x0
 
 /* rx reg_res_dsbl bitfield definitions
  * preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -1300,17 +1307,17 @@
  */
 
 /* register address for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_adr 0x00005000
+#define HW_ATL_RX_REG_RES_DSBL_ADR 0x00005000
 /* bitmask for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_RX_REG_RES_DSBL_MSK 0x20000000
 /* inverted bitmask for bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_RX_REG_RES_DSBL_MSKN 0xdfffffff
 /* lower bit position of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_shift 29
+#define HW_ATL_RX_REG_RES_DSBL_SHIFT 29
 /* width of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_width 1
+#define HW_ATL_RX_REG_RES_DSBL_WIDTH 1
 /* default value of bitfield reg_res_dsbl */
-#define rx_reg_res_dsbl_default 0x1
+#define HW_ATL_RX_REG_RES_DSBL_DEFAULT 0x1
 
 /* tx dca{d}_cpuid[7:0] bitfield definitions
  * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]".
@@ -1319,17 +1326,17 @@
  */
 
 /* register address for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCADCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
 /* bitmask for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_msk 0x000000ff
+#define HW_ATL_TDM_DCADCPUID_MSK 0x000000ff
 /* inverted bitmask for bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_mskn 0xffffff00
+#define HW_ATL_TDM_DCADCPUID_MSKN 0xffffff00
 /* lower bit position of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_shift 0
+#define HW_ATL_TDM_DCADCPUID_SHIFT 0
 /* width of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_width 8
+#define HW_ATL_TDM_DCADCPUID_WIDTH 8
 /* default value of bitfield dca{d}_cpuid[7:0] */
-#define tdm_dcadcpuid_default 0x0
+#define HW_ATL_TDM_DCADCPUID_DEFAULT 0x0
 
 /* tx lso_en[1f:0] bitfield definitions
  * preprocessor definitions for the bitfield "lso_en[1f:0]".
@@ -1337,17 +1344,17 @@
  */
 
 /* register address for bitfield lso_en[1f:0] */
-#define tdm_lso_en_adr 0x00007810
+#define HW_ATL_TDM_LSO_EN_ADR 0x00007810
 /* bitmask for bitfield lso_en[1f:0] */
-#define tdm_lso_en_msk 0xffffffff
+#define HW_ATL_TDM_LSO_EN_MSK 0xffffffff
 /* inverted bitmask for bitfield lso_en[1f:0] */
-#define tdm_lso_en_mskn 0x00000000
+#define HW_ATL_TDM_LSO_EN_MSKN 0x00000000
 /* lower bit position of bitfield lso_en[1f:0] */
-#define tdm_lso_en_shift 0
+#define HW_ATL_TDM_LSO_EN_SHIFT 0
 /* width of bitfield lso_en[1f:0] */
-#define tdm_lso_en_width 32
+#define HW_ATL_TDM_LSO_EN_WIDTH 32
 /* default value of bitfield lso_en[1f:0] */
-#define tdm_lso_en_default 0x0
+#define HW_ATL_TDM_LSO_EN_DEFAULT 0x0
 
 /* tx dca_en bitfield definitions
  * preprocessor definitions for the bitfield "dca_en".
@@ -1355,17 +1362,17 @@
  */
 
 /* register address for bitfield dca_en */
-#define tdm_dca_en_adr 0x00008480
+#define HW_ATL_TDM_DCA_EN_ADR 0x00008480
 /* bitmask for bitfield dca_en */
-#define tdm_dca_en_msk 0x80000000
+#define HW_ATL_TDM_DCA_EN_MSK 0x80000000
 /* inverted bitmask for bitfield dca_en */
-#define tdm_dca_en_mskn 0x7fffffff
+#define HW_ATL_TDM_DCA_EN_MSKN 0x7fffffff
 /* lower bit position of bitfield dca_en */
-#define tdm_dca_en_shift 31
+#define HW_ATL_TDM_DCA_EN_SHIFT 31
 /* width of bitfield dca_en */
-#define tdm_dca_en_width 1
+#define HW_ATL_TDM_DCA_EN_WIDTH 1
 /* default value of bitfield dca_en */
-#define tdm_dca_en_default 0x1
+#define HW_ATL_TDM_DCA_EN_DEFAULT 0x1
 
 /* tx dca_mode[3:0] bitfield definitions
  * preprocessor definitions for the bitfield "dca_mode[3:0]".
@@ -1373,17 +1380,17 @@
  */
 
 /* register address for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_adr 0x00008480
+#define HW_ATL_TDM_DCA_MODE_ADR 0x00008480
 /* bitmask for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_msk 0x0000000f
+#define HW_ATL_TDM_DCA_MODE_MSK 0x0000000f
 /* inverted bitmask for bitfield dca_mode[3:0] */
-#define tdm_dca_mode_mskn 0xfffffff0
+#define HW_ATL_TDM_DCA_MODE_MSKN 0xfffffff0
 /* lower bit position of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_shift 0
+#define HW_ATL_TDM_DCA_MODE_SHIFT 0
 /* width of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_width 4
+#define HW_ATL_TDM_DCA_MODE_WIDTH 4
 /* default value of bitfield dca_mode[3:0] */
-#define tdm_dca_mode_default 0x0
+#define HW_ATL_TDM_DCA_MODE_DEFAULT 0x0
 
 /* tx dca{d}_desc_en bitfield definitions
  * preprocessor definitions for the bitfield "dca{d}_desc_en".
@@ -1392,17 +1399,17 @@
  */
 
 /* register address for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCADDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
 /* bitmask for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_msk 0x80000000
+#define HW_ATL_TDM_DCADDESC_EN_MSK 0x80000000
 /* inverted bitmask for bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_mskn 0x7fffffff
+#define HW_ATL_TDM_DCADDESC_EN_MSKN 0x7fffffff
 /* lower bit position of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_shift 31
+#define HW_ATL_TDM_DCADDESC_EN_SHIFT 31
 /* width of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_width 1
+#define HW_ATL_TDM_DCADDESC_EN_WIDTH 1
 /* default value of bitfield dca{d}_desc_en */
-#define tdm_dcaddesc_en_default 0x0
+#define HW_ATL_TDM_DCADDESC_EN_DEFAULT 0x0
 
 /* tx desc{d}_en bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_en".
@@ -1411,17 +1418,17 @@
  */
 
 /* register address for bitfield desc{d}_en */
-#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
 /* bitmask for bitfield desc{d}_en */
-#define tdm_descden_msk 0x80000000
+#define HW_ATL_TDM_DESCDEN_MSK 0x80000000
 /* inverted bitmask for bitfield desc{d}_en */
-#define tdm_descden_mskn 0x7fffffff
+#define HW_ATL_TDM_DESCDEN_MSKN 0x7fffffff
 /* lower bit position of bitfield desc{d}_en */
-#define tdm_descden_shift 31
+#define HW_ATL_TDM_DESCDEN_SHIFT 31
 /* width of bitfield desc{d}_en */
-#define tdm_descden_width 1
+#define HW_ATL_TDM_DESCDEN_WIDTH 1
 /* default value of bitfield desc{d}_en */
-#define tdm_descden_default 0x0
+#define HW_ATL_TDM_DESCDEN_DEFAULT 0x0
 
 /* tx desc{d}_hd[c:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
@@ -1430,15 +1437,15 @@
  */
 
 /* register address for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDHD_ADR(descriptor) (0x00007c0c + (descriptor) * 0x40)
 /* bitmask for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_msk 0x00001fff
+#define HW_ATL_TDM_DESCDHD_MSK 0x00001fff
 /* inverted bitmask for bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_mskn 0xffffe000
+#define HW_ATL_TDM_DESCDHD_MSKN 0xffffe000
 /* lower bit position of bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_shift 0
+#define HW_ATL_TDM_DESCDHD_SHIFT 0
 /* width of bitfield desc{d}_hd[c:0] */
-#define tdm_descdhd_width 13
+#define HW_ATL_TDM_DESCDHD_WIDTH 13
 
 /* tx desc{d}_len[9:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
@@ -1447,17 +1454,17 @@
  */
 
 /* register address for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDLEN_ADR(descriptor) (0x00007c08 + (descriptor) * 0x40)
 /* bitmask for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_msk 0x00001ff8
+#define HW_ATL_TDM_DESCDLEN_MSK 0x00001ff8
 /* inverted bitmask for bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_mskn 0xffffe007
+#define HW_ATL_TDM_DESCDLEN_MSKN 0xffffe007
 /* lower bit position of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_shift 3
+#define HW_ATL_TDM_DESCDLEN_SHIFT 3
 /* width of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_width 10
+#define HW_ATL_TDM_DESCDLEN_WIDTH 10
 /* default value of bitfield desc{d}_len[9:0] */
-#define tdm_descdlen_default 0x0
+#define HW_ATL_TDM_DESCDLEN_DEFAULT 0x0
 
 /* tx int_desc_wrb_en bitfield definitions
  * preprocessor definitions for the bitfield "int_desc_wrb_en".
@@ -1465,17 +1472,17 @@
  */
 
 /* register address for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_adr 0x00007b40
+#define HW_ATL_TDM_INT_DESC_WRB_EN_ADR 0x00007b40
 /* bitmask for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_msk 0x00000002
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSK 0x00000002
 /* inverted bitmask for bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_mskn 0xfffffffd
+#define HW_ATL_TDM_INT_DESC_WRB_EN_MSKN 0xfffffffd
 /* lower bit position of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_shift 1
+#define HW_ATL_TDM_INT_DESC_WRB_EN_SHIFT 1
 /* width of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_width 1
+#define HW_ATL_TDM_INT_DESC_WRB_EN_WIDTH 1
 /* default value of bitfield int_desc_wrb_en */
-#define tdm_int_desc_wrb_en_default 0x0
+#define HW_ATL_TDM_INT_DESC_WRB_EN_DEFAULT 0x0
 
 /* tx desc{d}_wrb_thresh[6:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]".
@@ -1484,17 +1491,18 @@
  */
 
 /* register address for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESCDWRB_THRESH_ADR(descriptor) \
+	(0x00007c18 + (descriptor) * 0x40)
 /* bitmask for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_msk 0x00007f00
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSK 0x00007f00
 /* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_mskn 0xffff80ff
+#define HW_ATL_TDM_DESCDWRB_THRESH_MSKN 0xffff80ff
 /* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_shift 8
+#define HW_ATL_TDM_DESCDWRB_THRESH_SHIFT 8
 /* width of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_width 7
+#define HW_ATL_TDM_DESCDWRB_THRESH_WIDTH 7
 /* default value of bitfield desc{d}_wrb_thresh[6:0] */
-#define tdm_descdwrb_thresh_default 0x0
+#define HW_ATL_TDM_DESCDWRB_THRESH_DEFAULT 0x0
 
 /* tx lso_tcp_flag_first[b:0] bitfield definitions
  * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]".
@@ -1502,17 +1510,17 @@
  */
 
 /* register address for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_adr 0x00007820
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_ADR 0x00007820
 /* bitmask for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_msk 0x00000fff
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSK 0x00000fff
 /* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_mskn 0xfffff000
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_MSKN 0xfffff000
 /* lower bit position of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_shift 0
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_SHIFT 0
 /* width of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_WIDTH 12
 /* default value of bitfield lso_tcp_flag_first[b:0] */
-#define thm_lso_tcp_flag_first_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_FIRST_DEFAULT 0x0
 
 /* tx lso_tcp_flag_last[b:0] bitfield definitions
  * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]".
@@ -1520,17 +1528,17 @@
  */
 
 /* register address for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_adr 0x00007824
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_ADR 0x00007824
 /* bitmask for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_msk 0x00000fff
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSK 0x00000fff
 /* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_mskn 0xfffff000
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_MSKN 0xfffff000
 /* lower bit position of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_shift 0
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_SHIFT 0
 /* width of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_WIDTH 12
 /* default value of bitfield lso_tcp_flag_last[b:0] */
-#define thm_lso_tcp_flag_last_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_LAST_DEFAULT 0x0
 
 /* tx lso_tcp_flag_mid[b:0] bitfield definitions
  * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]".
@@ -1538,17 +1546,17 @@
  */
 
 /* Register address for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_adr 0x00005598
+#define HW_ATL_RPO_LRO_RSC_MAX_ADR 0x00005598
 /* Bitmask for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_msk 0xFFFFFFFF
+#define HW_ATL_RPO_LRO_RSC_MAX_MSK 0xFFFFFFFF
 /* Inverted bitmask for bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_mskn 0x00000000
+#define HW_ATL_RPO_LRO_RSC_MAX_MSKN 0x00000000
 /* Lower bit position of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_shift 0
+#define HW_ATL_RPO_LRO_RSC_MAX_SHIFT 0
 /* Width of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_width 32
+#define HW_ATL_RPO_LRO_RSC_MAX_WIDTH 32
 /* Default value of bitfield lro_rsc_max[1F:0] */
-#define rpo_lro_rsc_max_default 0x0
+#define HW_ATL_RPO_LRO_RSC_MAX_DEFAULT 0x0
 
 /* RX lro_en[1F:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_en[1F:0]".
@@ -1556,17 +1564,17 @@
  */
 
 /* Register address for bitfield lro_en[1F:0] */
-#define rpo_lro_en_adr 0x00005590
+#define HW_ATL_RPO_LRO_EN_ADR 0x00005590
 /* Bitmask for bitfield lro_en[1F:0] */
-#define rpo_lro_en_msk 0xFFFFFFFF
+#define HW_ATL_RPO_LRO_EN_MSK 0xFFFFFFFF
 /* Inverted bitmask for bitfield lro_en[1F:0] */
-#define rpo_lro_en_mskn 0x00000000
+#define HW_ATL_RPO_LRO_EN_MSKN 0x00000000
 /* Lower bit position of bitfield lro_en[1F:0] */
-#define rpo_lro_en_shift 0
+#define HW_ATL_RPO_LRO_EN_SHIFT 0
 /* Width of bitfield lro_en[1F:0] */
-#define rpo_lro_en_width 32
+#define HW_ATL_RPO_LRO_EN_WIDTH 32
 /* Default value of bitfield lro_en[1F:0] */
-#define rpo_lro_en_default 0x0
+#define HW_ATL_RPO_LRO_EN_DEFAULT 0x0
 
 /* RX lro_ptopt_en Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_ptopt_en".
@@ -1574,17 +1582,17 @@
  */
 
 /* Register address for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_adr 0x00005594
+#define HW_ATL_RPO_LRO_PTOPT_EN_ADR 0x00005594
 /* Bitmask for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_msk 0x00008000
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSK 0x00008000
 /* Inverted bitmask for bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF
+#define HW_ATL_RPO_LRO_PTOPT_EN_MSKN 0xFFFF7FFF
 /* Lower bit position of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_shift 15
+#define HW_ATL_RPO_LRO_PTOPT_EN_SHIFT 15
 /* Width of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_width 1
+#define HW_ATL_RPO_LRO_PTOPT_EN_WIDTH 1
 /* Default value of bitfield lro_ptopt_en */
-#define rpo_lro_ptopt_en_defalt 0x1
+#define HW_ATL_RPO_LRO_PTOPT_EN_DEFALT 0x1
 
 /* RX lro_q_ses_lmt Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_q_ses_lmt".
@@ -1592,17 +1600,17 @@
  */
 
 /* Register address for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_adr 0x00005594
+#define HW_ATL_RPO_LRO_QSES_LMT_ADR 0x00005594
 /* Bitmask for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_msk 0x00003000
+#define HW_ATL_RPO_LRO_QSES_LMT_MSK 0x00003000
 /* Inverted bitmask for bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF
+#define HW_ATL_RPO_LRO_QSES_LMT_MSKN 0xFFFFCFFF
 /* Lower bit position of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_shift 12
+#define HW_ATL_RPO_LRO_QSES_LMT_SHIFT 12
 /* Width of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_width 2
+#define HW_ATL_RPO_LRO_QSES_LMT_WIDTH 2
 /* Default value of bitfield lro_q_ses_lmt */
-#define rpo_lro_qses_lmt_default 0x1
+#define HW_ATL_RPO_LRO_QSES_LMT_DEFAULT 0x1
 
 /* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]".
@@ -1610,17 +1618,17 @@
  */
 
 /* Register address for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_adr 0x00005594
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_ADR 0x00005594
 /* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_msk 0x00000060
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSK 0x00000060
 /* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_MSKN 0xFFFFFF9F
 /* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_shift 5
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_SHIFT 5
 /* Width of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_width 2
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_WIDTH 2
 /* Default value of bitfield lro_tot_dsc_lmt[1:0] */
-#define rpo_lro_tot_dsc_lmt_defalt 0x1
+#define HW_ATL_RPO_LRO_TOT_DSC_LMT_DEFALT 0x1
 
 /* RX lro_pkt_min[4:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]".
@@ -1628,22 +1636,22 @@
  */
 
 /* Register address for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_adr 0x00005594
+#define HW_ATL_RPO_LRO_PKT_MIN_ADR 0x00005594
 /* Bitmask for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_msk 0x0000001F
+#define HW_ATL_RPO_LRO_PKT_MIN_MSK 0x0000001F
 /* Inverted bitmask for bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_mskn 0xFFFFFFE0
+#define HW_ATL_RPO_LRO_PKT_MIN_MSKN 0xFFFFFFE0
 /* Lower bit position of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_shift 0
+#define HW_ATL_RPO_LRO_PKT_MIN_SHIFT 0
 /* Width of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_width 5
+#define HW_ATL_RPO_LRO_PKT_MIN_WIDTH 5
 /* Default value of bitfield lro_pkt_min[4:0] */
-#define rpo_lro_pkt_min_default 0x8
+#define HW_ATL_RPO_LRO_PKT_MIN_DEFAULT 0x8
 
 /* Width of bitfield lro{L}_des_max[1:0] */
-#define rpo_lro_ldes_max_width 2
+#define HW_ATL_RPO_LRO_LDES_MAX_WIDTH 2
 /* Default value of bitfield lro{L}_des_max[1:0] */
-#define rpo_lro_ldes_max_default 0x0
+#define HW_ATL_RPO_LRO_LDES_MAX_DEFAULT 0x0
 
 /* RX lro_tb_div[11:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_tb_div[11:0]".
@@ -1651,17 +1659,17 @@
  */
 
 /* Register address for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_adr 0x00005620
+#define HW_ATL_RPO_LRO_TB_DIV_ADR 0x00005620
 /* Bitmask for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_msk 0xFFF00000
+#define HW_ATL_RPO_LRO_TB_DIV_MSK 0xFFF00000
 /* Inverted bitmask for bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_mskn 0x000FFFFF
+#define HW_ATL_RPO_LRO_TB_DIV_MSKN 0x000FFFFF
 /* Lower bit position of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_shift 20
+#define HW_ATL_RPO_LRO_TB_DIV_SHIFT 20
 /* Width of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_width 12
+#define HW_ATL_RPO_LRO_TB_DIV_WIDTH 12
 /* Default value of bitfield lro_tb_div[11:0] */
-#define rpo_lro_tb_div_default 0xC35
+#define HW_ATL_RPO_LRO_TB_DIV_DEFAULT 0xC35
 
 /* RX lro_ina_ival[9:0] Bitfield Definitions
  *   Preprocessor definitions for the bitfield "lro_ina_ival[9:0]".
@@ -1669,17 +1677,17 @@
  */
 
 /* Register address for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_adr 0x00005620
+#define HW_ATL_RPO_LRO_INA_IVAL_ADR 0x00005620
 /* Bitmask for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_msk 0x000FFC00
+#define HW_ATL_RPO_LRO_INA_IVAL_MSK 0x000FFC00
 /* Inverted bitmask for bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_mskn 0xFFF003FF
+#define HW_ATL_RPO_LRO_INA_IVAL_MSKN 0xFFF003FF
 /* Lower bit position of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_shift 10
+#define HW_ATL_RPO_LRO_INA_IVAL_SHIFT 10
 /* Width of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_width 10
+#define HW_ATL_RPO_LRO_INA_IVAL_WIDTH 10
 /* Default value of bitfield lro_ina_ival[9:0] */
-#define rpo_lro_ina_ival_default 0xA
+#define HW_ATL_RPO_LRO_INA_IVAL_DEFAULT 0xA
 
 /* RX lro_max_ival[9:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "lro_max_ival[9:0]".
@@ -1687,17 +1695,17 @@
  */
 
 /* Register address for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_adr 0x00005620
+#define HW_ATL_RPO_LRO_MAX_IVAL_ADR 0x00005620
 /* Bitmask for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_msk 0x000003FF
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSK 0x000003FF
 /* Inverted bitmask for bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_mskn 0xFFFFFC00
+#define HW_ATL_RPO_LRO_MAX_IVAL_MSKN 0xFFFFFC00
 /* Lower bit position of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_shift 0
+#define HW_ATL_RPO_LRO_MAX_IVAL_SHIFT 0
 /* Width of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_width 10
+#define HW_ATL_RPO_LRO_MAX_IVAL_WIDTH 10
 /* Default value of bitfield lro_max_ival[9:0] */
-#define rpo_lro_max_ival_default 0x19
+#define HW_ATL_RPO_LRO_MAX_IVAL_DEFAULT 0x19
 
 /* TX dca{D}_cpuid[7:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]".
@@ -1706,17 +1714,17 @@
  */
 
 /* Register address for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCA_DCPUID_ADR(dca) (0x00008400 + (dca) * 0x4)
 /* Bitmask for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_msk 0x000000FF
+#define HW_ATL_TDM_DCA_DCPUID_MSK 0x000000FF
 /* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_mskn 0xFFFFFF00
+#define HW_ATL_TDM_DCA_DCPUID_MSKN 0xFFFFFF00
 /* Lower bit position of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_shift 0
+#define HW_ATL_TDM_DCA_DCPUID_SHIFT 0
 /* Width of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_width 8
+#define HW_ATL_TDM_DCA_DCPUID_WIDTH 8
 /* Default value of bitfield dca{D}_cpuid[7:0] */
-#define tdm_dca_dcpuid_default 0x0
+#define HW_ATL_TDM_DCA_DCPUID_DEFAULT 0x0
 
 /* TX dca{D}_desc_en Bitfield Definitions
  * Preprocessor definitions for the bitfield "dca{D}_desc_en".
@@ -1725,17 +1733,17 @@
  */
 
 /* Register address for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+#define HW_ATL_TDM_DCA_DDESC_EN_ADR(dca) (0x00008400 + (dca) * 0x4)
 /* Bitmask for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_msk 0x80000000
+#define HW_ATL_TDM_DCA_DDESC_EN_MSK 0x80000000
 /* Inverted bitmask for bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF
+#define HW_ATL_TDM_DCA_DDESC_EN_MSKN 0x7FFFFFFF
 /* Lower bit position of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_shift 31
+#define HW_ATL_TDM_DCA_DDESC_EN_SHIFT 31
 /* Width of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_width 1
+#define HW_ATL_TDM_DCA_DDESC_EN_WIDTH 1
 /* Default value of bitfield dca{D}_desc_en */
-#define tdm_dca_ddesc_en_default 0x0
+#define HW_ATL_TDM_DCA_DDESC_EN_DEFAULT 0x0
 
 /* TX desc{D}_en Bitfield Definitions
  * Preprocessor definitions for the bitfield "desc{D}_en".
@@ -1744,17 +1752,17 @@
  */
 
 /* Register address for bitfield desc{D}_en */
-#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
 /* Bitmask for bitfield desc{D}_en */
-#define tdm_desc_den_msk 0x80000000
+#define HW_ATL_TDM_DESC_DEN_MSK 0x80000000
 /* Inverted bitmask for bitfield desc{D}_en */
-#define tdm_desc_den_mskn 0x7FFFFFFF
+#define HW_ATL_TDM_DESC_DEN_MSKN 0x7FFFFFFF
 /* Lower bit position of bitfield desc{D}_en */
-#define tdm_desc_den_shift 31
+#define HW_ATL_TDM_DESC_DEN_SHIFT 31
 /* Width of bitfield desc{D}_en */
-#define tdm_desc_den_width 1
+#define HW_ATL_TDM_DESC_DEN_WIDTH 1
 /* Default value of bitfield desc{D}_en */
-#define tdm_desc_den_default 0x0
+#define HW_ATL_TDM_DESC_DEN_DEFAULT 0x0
 
 /* TX desc{D}_hd[C:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]".
@@ -1763,15 +1771,15 @@
  */
 
 /* Register address for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DHD_ADR(descriptor) (0x00007C0C + (descriptor) * 0x40)
 /* Bitmask for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_msk 0x00001FFF
+#define HW_ATL_TDM_DESC_DHD_MSK 0x00001FFF
 /* Inverted bitmask for bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_mskn 0xFFFFE000
+#define HW_ATL_TDM_DESC_DHD_MSKN 0xFFFFE000
 /* Lower bit position of bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_shift 0
+#define HW_ATL_TDM_DESC_DHD_SHIFT 0
 /* Width of bitfield desc{D}_hd[C:0] */
-#define tdm_desc_dhd_width 13
+#define HW_ATL_TDM_DESC_DHD_WIDTH 13
 
 /* TX desc{D}_len[9:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "desc{D}_len[9:0]".
@@ -1780,17 +1788,17 @@
  */
 
 /* Register address for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+#define HW_ATL_TDM_DESC_DLEN_ADR(descriptor) (0x00007C08 + (descriptor) * 0x40)
 /* Bitmask for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_msk 0x00001FF8
+#define HW_ATL_TDM_DESC_DLEN_MSK 0x00001FF8
 /* Inverted bitmask for bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_mskn 0xFFFFE007
+#define HW_ATL_TDM_DESC_DLEN_MSKN 0xFFFFE007
 /* Lower bit position of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_shift 3
+#define HW_ATL_TDM_DESC_DLEN_SHIFT 3
 /* Width of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_width 10
+#define HW_ATL_TDM_DESC_DLEN_WIDTH 10
 /* Default value of bitfield desc{D}_len[9:0] */
-#define tdm_desc_dlen_default 0x0
+#define HW_ATL_TDM_DESC_DLEN_DEFAULT 0x0
 
 /* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]".
@@ -1799,18 +1807,18 @@
  */
 
 /* Register address for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_adr(descriptor) \
+#define HW_ATL_TDM_DESC_DWRB_THRESH_ADR(descriptor) \
 	(0x00007C18 + (descriptor) * 0x40)
 /* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_msk 0x00007F00
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSK 0x00007F00
 /* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF
+#define HW_ATL_TDM_DESC_DWRB_THRESH_MSKN 0xFFFF80FF
 /* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_shift 8
+#define HW_ATL_TDM_DESC_DWRB_THRESH_SHIFT 8
 /* Width of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_width 7
+#define HW_ATL_TDM_DESC_DWRB_THRESH_WIDTH 7
 /* Default value of bitfield desc{D}_wrb_thresh[6:0] */
-#define tdm_desc_dwrb_thresh_default 0x0
+#define HW_ATL_TDM_DESC_DWRB_THRESH_DEFAULT 0x0
 
 /* TX tdm_int_mod_en Bitfield Definitions
  * Preprocessor definitions for the bitfield "tdm_int_mod_en".
@@ -1818,34 +1826,34 @@
  */
 
 /* Register address for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_adr 0x00007B40
+#define HW_ATL_TDM_INT_MOD_EN_ADR 0x00007B40
 /* Bitmask for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_msk 0x00000010
+#define HW_ATL_TDM_INT_MOD_EN_MSK 0x00000010
 /* Inverted bitmask for bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_mskn 0xFFFFFFEF
+#define HW_ATL_TDM_INT_MOD_EN_MSKN 0xFFFFFFEF
 /* Lower bit position of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_shift 4
+#define HW_ATL_TDM_INT_MOD_EN_SHIFT 4
 /* Width of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_width 1
+#define HW_ATL_TDM_INT_MOD_EN_WIDTH 1
 /* Default value of bitfield tdm_int_mod_en */
-#define tdm_int_mod_en_default 0x0
+#define HW_ATL_TDM_INT_MOD_EN_DEFAULT 0x0
 
 /* TX lso_tcp_flag_mid[B:0] Bitfield Definitions
  * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]".
  * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]"
  */
 /* register address for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_adr 0x00007820
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_ADR 0x00007820
 /* bitmask for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_msk 0x0fff0000
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSK 0x0fff0000
 /* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_mskn 0xf000ffff
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_MSKN 0xf000ffff
 /* lower bit position of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_shift 16
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_SHIFT 16
 /* width of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_width 12
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_WIDTH 12
 /* default value of bitfield lso_tcp_flag_mid[b:0] */
-#define thm_lso_tcp_flag_mid_default 0x0
+#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0
 
 /* tx tx_buf_en bitfield definitions
  * preprocessor definitions for the bitfield "tx_buf_en".
@@ -1853,17 +1861,17 @@
  */
 
 /* register address for bitfield tx_buf_en */
-#define tpb_tx_buf_en_adr 0x00007900
+#define HW_ATL_TPB_TX_BUF_EN_ADR 0x00007900
 /* bitmask for bitfield tx_buf_en */
-#define tpb_tx_buf_en_msk 0x00000001
+#define HW_ATL_TPB_TX_BUF_EN_MSK 0x00000001
 /* inverted bitmask for bitfield tx_buf_en */
-#define tpb_tx_buf_en_mskn 0xfffffffe
+#define HW_ATL_TPB_TX_BUF_EN_MSKN 0xfffffffe
 /* lower bit position of bitfield tx_buf_en */
-#define tpb_tx_buf_en_shift 0
+#define HW_ATL_TPB_TX_BUF_EN_SHIFT 0
 /* width of bitfield tx_buf_en */
-#define tpb_tx_buf_en_width 1
+#define HW_ATL_TPB_TX_BUF_EN_WIDTH 1
 /* default value of bitfield tx_buf_en */
-#define tpb_tx_buf_en_default 0x0
+#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
 
 /* tx tx{b}_hi_thresh[c:0] bitfield definitions
  * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
@@ -1872,17 +1880,17 @@
  */
 
 /* register address for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBHI_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
 /* bitmask for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_msk 0x1fff0000
+#define HW_ATL_TPB_TXBHI_THRESH_MSK 0x1fff0000
 /* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_mskn 0xe000ffff
+#define HW_ATL_TPB_TXBHI_THRESH_MSKN 0xe000ffff
 /* lower bit position of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_shift 16
+#define HW_ATL_TPB_TXBHI_THRESH_SHIFT 16
 /* width of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_width 13
+#define HW_ATL_TPB_TXBHI_THRESH_WIDTH 13
 /* default value of bitfield tx{b}_hi_thresh[c:0] */
-#define tpb_txbhi_thresh_default 0x0
+#define HW_ATL_TPB_TXBHI_THRESH_DEFAULT 0x0
 
 /* tx tx{b}_lo_thresh[c:0] bitfield definitions
  * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]".
@@ -1891,17 +1899,17 @@
  */
 
 /* register address for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBLO_THRESH_ADR(buffer) (0x00007914 + (buffer) * 0x10)
 /* bitmask for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_msk 0x00001fff
+#define HW_ATL_TPB_TXBLO_THRESH_MSK 0x00001fff
 /* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_mskn 0xffffe000
+#define HW_ATL_TPB_TXBLO_THRESH_MSKN 0xffffe000
 /* lower bit position of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_shift 0
+#define HW_ATL_TPB_TXBLO_THRESH_SHIFT 0
 /* width of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_width 13
+#define HW_ATL_TPB_TXBLO_THRESH_WIDTH 13
 /* default value of bitfield tx{b}_lo_thresh[c:0] */
-#define tpb_txblo_thresh_default 0x0
+#define HW_ATL_TPB_TXBLO_THRESH_DEFAULT 0x0
 
 /* tx dma_sys_loopback bitfield definitions
  * preprocessor definitions for the bitfield "dma_sys_loopback".
@@ -1909,17 +1917,17 @@
  */
 
 /* register address for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_adr 0x00007000
+#define HW_ATL_TPB_DMA_SYS_LBK_ADR 0x00007000
 /* bitmask for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_msk 0x00000040
+#define HW_ATL_TPB_DMA_SYS_LBK_MSK 0x00000040
 /* inverted bitmask for bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_mskn 0xffffffbf
+#define HW_ATL_TPB_DMA_SYS_LBK_MSKN 0xffffffbf
 /* lower bit position of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_shift 6
+#define HW_ATL_TPB_DMA_SYS_LBK_SHIFT 6
 /* width of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_width 1
+#define HW_ATL_TPB_DMA_SYS_LBK_WIDTH 1
 /* default value of bitfield dma_sys_loopback */
-#define tpb_dma_sys_lbk_default 0x0
+#define HW_ATL_TPB_DMA_SYS_LBK_DEFAULT 0x0
 
 /* tx tx{b}_buf_size[7:0] bitfield definitions
  * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
@@ -1928,17 +1936,17 @@
  */
 
 /* register address for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10)
+#define HW_ATL_TPB_TXBBUF_SIZE_ADR(buffer) (0x00007910 + (buffer) * 0x10)
 /* bitmask for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_msk 0x000000ff
+#define HW_ATL_TPB_TXBBUF_SIZE_MSK 0x000000ff
 /* inverted bitmask for bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_mskn 0xffffff00
+#define HW_ATL_TPB_TXBBUF_SIZE_MSKN 0xffffff00
 /* lower bit position of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_shift 0
+#define HW_ATL_TPB_TXBBUF_SIZE_SHIFT 0
 /* width of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_width 8
+#define HW_ATL_TPB_TXBBUF_SIZE_WIDTH 8
 /* default value of bitfield tx{b}_buf_size[7:0] */
-#define tpb_txbbuf_size_default 0x0
+#define HW_ATL_TPB_TXBBUF_SIZE_DEFAULT 0x0
 
 /* tx tx_scp_ins_en bitfield definitions
  * preprocessor definitions for the bitfield "tx_scp_ins_en".
@@ -1946,17 +1954,17 @@
  */
 
 /* register address for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_adr 0x00007900
+#define HW_ATL_TPB_TX_SCP_INS_EN_ADR 0x00007900
 /* bitmask for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_msk 0x00000004
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSK 0x00000004
 /* inverted bitmask for bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_mskn 0xfffffffb
+#define HW_ATL_TPB_TX_SCP_INS_EN_MSKN 0xfffffffb
 /* lower bit position of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_shift 2
+#define HW_ATL_TPB_TX_SCP_INS_EN_SHIFT 2
 /* width of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_width 1
+#define HW_ATL_TPB_TX_SCP_INS_EN_WIDTH 1
 /* default value of bitfield tx_scp_ins_en */
-#define tpb_tx_scp_ins_en_default 0x0
+#define HW_ATL_TPB_TX_SCP_INS_EN_DEFAULT 0x0
 
 /* tx ipv4_chk_en bitfield definitions
  * preprocessor definitions for the bitfield "ipv4_chk_en".
@@ -1964,17 +1972,17 @@
  */
 
 /* register address for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_adr 0x00007800
+#define HW_ATL_TPO_IPV4CHK_EN_ADR 0x00007800
 /* bitmask for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_msk 0x00000002
+#define HW_ATL_TPO_IPV4CHK_EN_MSK 0x00000002
 /* inverted bitmask for bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_mskn 0xfffffffd
+#define HW_ATL_TPO_IPV4CHK_EN_MSKN 0xfffffffd
 /* lower bit position of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_shift 1
+#define HW_ATL_TPO_IPV4CHK_EN_SHIFT 1
 /* width of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_width 1
+#define HW_ATL_TPO_IPV4CHK_EN_WIDTH 1
 /* default value of bitfield ipv4_chk_en */
-#define tpo_ipv4chk_en_default 0x0
+#define HW_ATL_TPO_IPV4CHK_EN_DEFAULT 0x0
 
 /* tx l4_chk_en bitfield definitions
  * preprocessor definitions for the bitfield "l4_chk_en".
@@ -1982,17 +1990,17 @@
  */
 
 /* register address for bitfield l4_chk_en */
-#define tpol4chk_en_adr 0x00007800
+#define HW_ATL_TPOL4CHK_EN_ADR 0x00007800
 /* bitmask for bitfield l4_chk_en */
-#define tpol4chk_en_msk 0x00000001
+#define HW_ATL_TPOL4CHK_EN_MSK 0x00000001
 /* inverted bitmask for bitfield l4_chk_en */
-#define tpol4chk_en_mskn 0xfffffffe
+#define HW_ATL_TPOL4CHK_EN_MSKN 0xfffffffe
 /* lower bit position of bitfield l4_chk_en */
-#define tpol4chk_en_shift 0
+#define HW_ATL_TPOL4CHK_EN_SHIFT 0
 /* width of bitfield l4_chk_en */
-#define tpol4chk_en_width 1
+#define HW_ATL_TPOL4CHK_EN_WIDTH 1
 /* default value of bitfield l4_chk_en */
-#define tpol4chk_en_default 0x0
+#define HW_ATL_TPOL4CHK_EN_DEFAULT 0x0
 
 /* tx pkt_sys_loopback bitfield definitions
  * preprocessor definitions for the bitfield "pkt_sys_loopback".
@@ -2000,17 +2008,17 @@
  */
 
 /* register address for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_adr 0x00007000
+#define HW_ATL_TPO_PKT_SYS_LBK_ADR 0x00007000
 /* bitmask for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_msk 0x00000080
+#define HW_ATL_TPO_PKT_SYS_LBK_MSK 0x00000080
 /* inverted bitmask for bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_mskn 0xffffff7f
+#define HW_ATL_TPO_PKT_SYS_LBK_MSKN 0xffffff7f
 /* lower bit position of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_shift 7
+#define HW_ATL_TPO_PKT_SYS_LBK_SHIFT 7
 /* width of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_width 1
+#define HW_ATL_TPO_PKT_SYS_LBK_WIDTH 1
 /* default value of bitfield pkt_sys_loopback */
-#define tpo_pkt_sys_lbk_default 0x0
+#define HW_ATL_TPO_PKT_SYS_LBK_DEFAULT 0x0
 
 /* tx data_tc_arb_mode bitfield definitions
  * preprocessor definitions for the bitfield "data_tc_arb_mode".
@@ -2018,17 +2026,17 @@
  */
 
 /* register address for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_adr 0x00007100
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_ADR 0x00007100
 /* bitmask for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_msk 0x00000001
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSK 0x00000001
 /* inverted bitmask for bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_mskn 0xfffffffe
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffe
 /* lower bit position of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_shift 0
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_SHIFT 0
 /* width of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_width 1
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_WIDTH 1
 /* default value of bitfield data_tc_arb_mode */
-#define tps_data_tc_arb_mode_default 0x0
+#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
 
 /* tx desc_rate_ta_rst bitfield definitions
  * preprocessor definitions for the bitfield "desc_rate_ta_rst".
@@ -2036,17 +2044,17 @@
  */
 
 /* register address for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_adr 0x00007310
+#define HW_ATL_TPS_DESC_RATE_TA_RST_ADR 0x00007310
 /* bitmask for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_msk 0x80000000
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSK 0x80000000
 /* inverted bitmask for bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_mskn 0x7fffffff
+#define HW_ATL_TPS_DESC_RATE_TA_RST_MSKN 0x7fffffff
 /* lower bit position of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_shift 31
+#define HW_ATL_TPS_DESC_RATE_TA_RST_SHIFT 31
 /* width of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_width 1
+#define HW_ATL_TPS_DESC_RATE_TA_RST_WIDTH 1
 /* default value of bitfield desc_rate_ta_rst */
-#define tps_desc_rate_ta_rst_default 0x0
+#define HW_ATL_TPS_DESC_RATE_TA_RST_DEFAULT 0x0
 
 /* tx desc_rate_limit[a:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc_rate_limit[a:0]".
@@ -2054,17 +2062,17 @@
  */
 
 /* register address for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_adr 0x00007310
+#define HW_ATL_TPS_DESC_RATE_LIM_ADR 0x00007310
 /* bitmask for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_msk 0x000007ff
+#define HW_ATL_TPS_DESC_RATE_LIM_MSK 0x000007ff
 /* inverted bitmask for bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_mskn 0xfffff800
+#define HW_ATL_TPS_DESC_RATE_LIM_MSKN 0xfffff800
 /* lower bit position of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_shift 0
+#define HW_ATL_TPS_DESC_RATE_LIM_SHIFT 0
 /* width of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_width 11
+#define HW_ATL_TPS_DESC_RATE_LIM_WIDTH 11
 /* default value of bitfield desc_rate_limit[a:0] */
-#define tps_desc_rate_lim_default 0x0
+#define HW_ATL_TPS_DESC_RATE_LIM_DEFAULT 0x0
 
 /* tx desc_tc_arb_mode[1:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]".
@@ -2072,17 +2080,17 @@
  */
 
 /* register address for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_adr 0x00007200
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_ADR 0x00007200
 /* bitmask for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_msk 0x00000003
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSK 0x00000003
 /* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_mskn 0xfffffffc
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_MSKN 0xfffffffc
 /* lower bit position of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_shift 0
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_SHIFT 0
 /* width of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_width 2
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_WIDTH 2
 /* default value of bitfield desc_tc_arb_mode[1:0] */
-#define tps_desc_tc_arb_mode_default 0x0
+#define HW_ATL_TPS_DESC_TC_ARB_MODE_DEFAULT 0x0
 
 /* tx desc_tc{t}_credit_max[b:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]".
@@ -2091,17 +2099,17 @@
  */
 
 /* register address for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4)
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc) (0x00007210 + (tc) * 0x4)
 /* bitmask for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_msk 0x0fff0000
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK 0x0fff0000
 /* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_mskn 0xf000ffff
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSKN 0xf000ffff
 /* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_shift 16
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_SHIFT 16
 /* width of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_width 12
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_WIDTH 12
 /* default value of bitfield desc_tc{t}_credit_max[b:0] */
-#define tps_desc_tctcredit_max_default 0x0
+#define HW_ATL_TPS_DESC_TCTCREDIT_MAX_DEFAULT 0x0
 
 /* tx desc_tc{t}_weight[8:0] bitfield definitions
  * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]".
@@ -2110,17 +2118,17 @@
  */
 
 /* register address for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4)
+#define HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc) (0x00007210 + (tc) * 0x4)
 /* bitmask for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_msk 0x000001ff
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSK 0x000001ff
 /* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_mskn 0xfffffe00
+#define HW_ATL_TPS_DESC_TCTWEIGHT_MSKN 0xfffffe00
 /* lower bit position of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_shift 0
+#define HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT 0
 /* width of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_width 9
+#define HW_ATL_TPS_DESC_TCTWEIGHT_WIDTH 9
 /* default value of bitfield desc_tc{t}_weight[8:0] */
-#define tps_desc_tctweight_default 0x0
+#define HW_ATL_TPS_DESC_TCTWEIGHT_DEFAULT 0x0
 
 /* tx desc_vm_arb_mode bitfield definitions
  * preprocessor definitions for the bitfield "desc_vm_arb_mode".
@@ -2128,17 +2136,17 @@
  */
 
 /* register address for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_adr 0x00007300
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_ADR 0x00007300
 /* bitmask for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_msk 0x00000001
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSK 0x00000001
 /* inverted bitmask for bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_mskn 0xfffffffe
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_MSKN 0xfffffffe
 /* lower bit position of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_shift 0
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_SHIFT 0
 /* width of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_width 1
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_WIDTH 1
 /* default value of bitfield desc_vm_arb_mode */
-#define tps_desc_vm_arb_mode_default 0x0
+#define HW_ATL_TPS_DESC_VM_ARB_MODE_DEFAULT 0x0
 
 /* tx data_tc{t}_credit_max[b:0] bitfield definitions
  * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
@@ -2147,17 +2155,17 @@
  */
 
 /* register address for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4)
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
 /* bitmask for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_msk 0x0fff0000
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000
 /* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_mskn 0xf000ffff
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff
 /* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_shift 16
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
 /* width of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_width 12
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_WIDTH 12
 /* default value of bitfield data_tc{t}_credit_max[b:0] */
-#define tps_data_tctcredit_max_default 0x0
+#define HW_ATL_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
 
 /* tx data_tc{t}_weight[8:0] bitfield definitions
  * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
@@ -2166,17 +2174,17 @@
  */
 
 /* register address for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4)
+#define HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
 /* bitmask for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_msk 0x000001ff
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSK 0x000001ff
 /* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_mskn 0xfffffe00
+#define HW_ATL_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00
 /* lower bit position of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_shift 0
+#define HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT 0
 /* width of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_width 9
+#define HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH 9
 /* default value of bitfield data_tc{t}_weight[8:0] */
-#define tps_data_tctweight_default 0x0
+#define HW_ATL_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
 
 /* tx reg_res_dsbl bitfield definitions
  * preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -2184,17 +2192,17 @@
  */
 
 /* register address for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_adr 0x00007000
+#define HW_ATL_TX_REG_RES_DSBL_ADR 0x00007000
 /* bitmask for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_TX_REG_RES_DSBL_MSK 0x20000000
 /* inverted bitmask for bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_TX_REG_RES_DSBL_MSKN 0xdfffffff
 /* lower bit position of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_shift 29
+#define HW_ATL_TX_REG_RES_DSBL_SHIFT 29
 /* width of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_width 1
+#define HW_ATL_TX_REG_RES_DSBL_WIDTH 1
 /* default value of bitfield reg_res_dsbl */
-#define tx_reg_res_dsbl_default 0x1
+#define HW_ATL_TX_REG_RES_DSBL_DEFAULT 0x1
 
 /* mac_phy register access busy bitfield definitions
  * preprocessor definitions for the bitfield "register access busy".
@@ -2202,15 +2210,15 @@
  */
 
 /* register address for bitfield register access busy */
-#define msm_reg_access_busy_adr 0x00004400
+#define HW_ATL_MSM_REG_ACCESS_BUSY_ADR 0x00004400
 /* bitmask for bitfield register access busy */
-#define msm_reg_access_busy_msk 0x00001000
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSK 0x00001000
 /* inverted bitmask for bitfield register access busy */
-#define msm_reg_access_busy_mskn 0xffffefff
+#define HW_ATL_MSM_REG_ACCESS_BUSY_MSKN 0xffffefff
 /* lower bit position of bitfield register access busy */
-#define msm_reg_access_busy_shift 12
+#define HW_ATL_MSM_REG_ACCESS_BUSY_SHIFT 12
 /* width of bitfield register access busy */
-#define msm_reg_access_busy_width 1
+#define HW_ATL_MSM_REG_ACCESS_BUSY_WIDTH 1
 
 /* mac_phy msm register address[7:0] bitfield definitions
  * preprocessor definitions for the bitfield "msm register address[7:0]".
@@ -2218,17 +2226,17 @@
  */
 
 /* register address for bitfield msm register address[7:0] */
-#define msm_reg_addr_adr 0x00004400
+#define HW_ATL_MSM_REG_ADDR_ADR 0x00004400
 /* bitmask for bitfield msm register address[7:0] */
-#define msm_reg_addr_msk 0x000000ff
+#define HW_ATL_MSM_REG_ADDR_MSK 0x000000ff
 /* inverted bitmask for bitfield msm register address[7:0] */
-#define msm_reg_addr_mskn 0xffffff00
+#define HW_ATL_MSM_REG_ADDR_MSKN 0xffffff00
 /* lower bit position of bitfield msm register address[7:0] */
-#define msm_reg_addr_shift 0
+#define HW_ATL_MSM_REG_ADDR_SHIFT 0
 /* width of bitfield msm register address[7:0] */
-#define msm_reg_addr_width 8
+#define HW_ATL_MSM_REG_ADDR_WIDTH 8
 /* default value of bitfield msm register address[7:0] */
-#define msm_reg_addr_default 0x0
+#define HW_ATL_MSM_REG_ADDR_DEFAULT 0x0
 
 /* mac_phy register read strobe bitfield definitions
  * preprocessor definitions for the bitfield "register read strobe".
@@ -2236,17 +2244,17 @@
  */
 
 /* register address for bitfield register read strobe */
-#define msm_reg_rd_strobe_adr 0x00004400
+#define HW_ATL_MSM_REG_RD_STROBE_ADR 0x00004400
 /* bitmask for bitfield register read strobe */
-#define msm_reg_rd_strobe_msk 0x00000200
+#define HW_ATL_MSM_REG_RD_STROBE_MSK 0x00000200
 /* inverted bitmask for bitfield register read strobe */
-#define msm_reg_rd_strobe_mskn 0xfffffdff
+#define HW_ATL_MSM_REG_RD_STROBE_MSKN 0xfffffdff
 /* lower bit position of bitfield register read strobe */
-#define msm_reg_rd_strobe_shift 9
+#define HW_ATL_MSM_REG_RD_STROBE_SHIFT 9
 /* width of bitfield register read strobe */
-#define msm_reg_rd_strobe_width 1
+#define HW_ATL_MSM_REG_RD_STROBE_WIDTH 1
 /* default value of bitfield register read strobe */
-#define msm_reg_rd_strobe_default 0x0
+#define HW_ATL_MSM_REG_RD_STROBE_DEFAULT 0x0
 
 /* mac_phy msm register read data[31:0] bitfield definitions
  * preprocessor definitions for the bitfield "msm register read data[31:0]".
@@ -2254,15 +2262,15 @@
  */
 
 /* register address for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_adr 0x00004408
+#define HW_ATL_MSM_REG_RD_DATA_ADR 0x00004408
 /* bitmask for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_msk 0xffffffff
+#define HW_ATL_MSM_REG_RD_DATA_MSK 0xffffffff
 /* inverted bitmask for bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_mskn 0x00000000
+#define HW_ATL_MSM_REG_RD_DATA_MSKN 0x00000000
 /* lower bit position of bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_shift 0
+#define HW_ATL_MSM_REG_RD_DATA_SHIFT 0
 /* width of bitfield msm register read data[31:0] */
-#define msm_reg_rd_data_width 32
+#define HW_ATL_MSM_REG_RD_DATA_WIDTH 32
 
 /* mac_phy msm register write data[31:0] bitfield definitions
  * preprocessor definitions for the bitfield "msm register write data[31:0]".
@@ -2270,17 +2278,17 @@
  */
 
 /* register address for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_adr 0x00004404
+#define HW_ATL_MSM_REG_WR_DATA_ADR 0x00004404
 /* bitmask for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_msk 0xffffffff
+#define HW_ATL_MSM_REG_WR_DATA_MSK 0xffffffff
 /* inverted bitmask for bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_mskn 0x00000000
+#define HW_ATL_MSM_REG_WR_DATA_MSKN 0x00000000
 /* lower bit position of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_shift 0
+#define HW_ATL_MSM_REG_WR_DATA_SHIFT 0
 /* width of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_width 32
+#define HW_ATL_MSM_REG_WR_DATA_WIDTH 32
 /* default value of bitfield msm register write data[31:0] */
-#define msm_reg_wr_data_default 0x0
+#define HW_ATL_MSM_REG_WR_DATA_DEFAULT 0x0
 
 /* mac_phy register write strobe bitfield definitions
  * preprocessor definitions for the bitfield "register write strobe".
@@ -2288,17 +2296,17 @@
  */
 
 /* register address for bitfield register write strobe */
-#define msm_reg_wr_strobe_adr 0x00004400
+#define HW_ATL_MSM_REG_WR_STROBE_ADR 0x00004400
 /* bitmask for bitfield register write strobe */
-#define msm_reg_wr_strobe_msk 0x00000100
+#define HW_ATL_MSM_REG_WR_STROBE_MSK 0x00000100
 /* inverted bitmask for bitfield register write strobe */
-#define msm_reg_wr_strobe_mskn 0xfffffeff
+#define HW_ATL_MSM_REG_WR_STROBE_MSKN 0xfffffeff
 /* lower bit position of bitfield register write strobe */
-#define msm_reg_wr_strobe_shift 8
+#define HW_ATL_MSM_REG_WR_STROBE_SHIFT 8
 /* width of bitfield register write strobe */
-#define msm_reg_wr_strobe_width 1
+#define HW_ATL_MSM_REG_WR_STROBE_WIDTH 1
 /* default value of bitfield register write strobe */
-#define msm_reg_wr_strobe_default 0x0
+#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0
 
 /* mif soft reset bitfield definitions
  * preprocessor definitions for the bitfield "soft reset".
@@ -2306,17 +2314,17 @@
  */
 
 /* register address for bitfield soft reset */
-#define glb_soft_res_adr 0x00000000
+#define HW_ATL_GLB_SOFT_RES_ADR 0x00000000
 /* bitmask for bitfield soft reset */
-#define glb_soft_res_msk 0x00008000
+#define HW_ATL_GLB_SOFT_RES_MSK 0x00008000
 /* inverted bitmask for bitfield soft reset */
-#define glb_soft_res_mskn 0xffff7fff
+#define HW_ATL_GLB_SOFT_RES_MSKN 0xffff7fff
 /* lower bit position of bitfield soft reset */
-#define glb_soft_res_shift 15
+#define HW_ATL_GLB_SOFT_RES_SHIFT 15
 /* width of bitfield soft reset */
-#define glb_soft_res_width 1
+#define HW_ATL_GLB_SOFT_RES_WIDTH 1
 /* default value of bitfield soft reset */
-#define glb_soft_res_default 0x0
+#define HW_ATL_GLB_SOFT_RES_DEFAULT 0x0
 
 /* mif register reset disable bitfield definitions
  * preprocessor definitions for the bitfield "register reset disable".
@@ -2324,27 +2332,27 @@
  */
 
 /* register address for bitfield register reset disable */
-#define glb_reg_res_dis_adr 0x00000000
+#define HW_ATL_GLB_REG_RES_DIS_ADR 0x00000000
 /* bitmask for bitfield register reset disable */
-#define glb_reg_res_dis_msk 0x00004000
+#define HW_ATL_GLB_REG_RES_DIS_MSK 0x00004000
 /* inverted bitmask for bitfield register reset disable */
-#define glb_reg_res_dis_mskn 0xffffbfff
+#define HW_ATL_GLB_REG_RES_DIS_MSKN 0xffffbfff
 /* lower bit position of bitfield register reset disable */
-#define glb_reg_res_dis_shift 14
+#define HW_ATL_GLB_REG_RES_DIS_SHIFT 14
 /* width of bitfield register reset disable */
-#define glb_reg_res_dis_width 1
+#define HW_ATL_GLB_REG_RES_DIS_WIDTH 1
 /* default value of bitfield register reset disable */
-#define glb_reg_res_dis_default 0x1
+#define HW_ATL_GLB_REG_RES_DIS_DEFAULT 0x1
 
 /* tx dma debug control definitions */
-#define tx_dma_debug_ctl_adr 0x00008920u
+#define HW_ATL_TX_DMA_DEBUG_CTL_ADR 0x00008920u
 
 /* tx dma descriptor base address msw definitions */
-#define tx_dma_desc_base_addrmsw_adr(descriptor) \
+#define HW_ATL_TX_DMA_DESC_BASE_ADDRMSW_ADR(descriptor) \
 			(0x00007c04u + (descriptor) * 0x40)
 
 /* tx dma total request limit */
-#define tx_dma_total_req_limit_adr 0x00007b20u
+#define HW_ATL_TX_DMA_TOTAL_REQ_LIMIT_ADR 0x00007b20u
 
 /* tx interrupt moderation control register definitions
  * Preprocessor definitions for TX Interrupt Moderation Control Register
@@ -2352,7 +2360,7 @@
  * Parameter: queue {Q} | stride size 0x4 | range [0, 31]
  */
 
-#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4)
+#define HW_ATL_TX_INTR_MODERATION_CTL_ADR(queue) (0x00008980u + (queue) * 0x4)
 
 /* pcie reg_res_dsbl bitfield definitions
  * preprocessor definitions for the bitfield "reg_res_dsbl".
@@ -2360,22 +2368,23 @@
  */
 
 /* register address for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_adr 0x00001000
+#define HW_ATL_PCI_REG_RES_DSBL_ADR 0x00001000
 /* bitmask for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_msk 0x20000000
+#define HW_ATL_PCI_REG_RES_DSBL_MSK 0x20000000
 /* inverted bitmask for bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_mskn 0xdfffffff
+#define HW_ATL_PCI_REG_RES_DSBL_MSKN 0xdfffffff
 /* lower bit position of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_shift 29
+#define HW_ATL_PCI_REG_RES_DSBL_SHIFT 29
 /* width of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_width 1
+#define HW_ATL_PCI_REG_RES_DSBL_WIDTH 1
 /* default value of bitfield reg_res_dsbl */
-#define pci_reg_res_dsbl_default 0x1
+#define HW_ATL_PCI_REG_RES_DSBL_DEFAULT 0x1
 
 /* PCI core control register */
-#define pci_reg_control6_adr 0x1014u
+#define HW_ATL_PCI_REG_CONTROL6_ADR 0x1014u
 
 /* global microprocessor scratch pad definitions */
-#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
+#define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \
+	(0x00000300u + (scratch_scp) * 0x4)
 
 #endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index f2ce12e..9c7e916 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -11,11 +11,9 @@
  * abstraction layer.
  */
 
-#include "../aq_hw.h"
+#include "../aq_nic.h"
 #include "../aq_hw_utils.h"
 #include "../aq_pci_func.h"
-#include "../aq_ring.h"
-#include "../aq_vec.h"
 #include "hw_atl_utils.h"
 #include "hw_atl_llh.h"
 
@@ -37,15 +35,15 @@ static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
 {
 	int err = 0;
 
-	AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self,
-					   HW_ATL_FW_SM_RAM) == 1U,
-					   1U, 10000U);
+	AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self,
+						  HW_ATL_FW_SM_RAM) == 1U,
+						  1U, 10000U);
 
 	if (err < 0) {
 		bool is_locked;
 
-		reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
-		is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+		hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+		is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
 		if (!is_locked) {
 			err = -ETIME;
 			goto err_exit;
@@ -66,7 +64,7 @@ static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
 		*(p++) = aq_hw_read_reg(self, 0x0000020CU);
 	}
 
-	reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+	hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
 
 err_exit:
 	return err;
@@ -78,7 +76,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
 	int err = 0;
 	bool is_locked;
 
-	is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+	is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
 	if (!is_locked) {
 		err = -ETIME;
 		goto err_exit;
@@ -97,7 +95,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
 		}
 	}
 
-	reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+	hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
 
 err_exit:
 	return err;
@@ -119,7 +117,7 @@ static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
 }
 
 static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
-				 struct aq_hw_caps_s *aq_hw_caps)
+				 const struct aq_hw_caps_s *aq_hw_caps)
 {
 	int err = 0;
 
@@ -133,10 +131,10 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
 		aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
 	}
 
-	reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
+	hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
 
 	/* check 10 times by 1ms */
-	AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr =
+	AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
 			aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
 
 	err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
@@ -174,14 +172,14 @@ static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
 		err = -1;
 		goto err_exit;
 	}
-	err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr,
-					    (u32 *)(void *)&PHAL_ATLANTIC->rpc,
+	err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
+					    (u32 *)(void *)&self->rpc,
 					    (rpc_size + sizeof(u32) -
 					    sizeof(u8)) / sizeof(u32));
 	if (err < 0)
 		goto err_exit;
 
-	sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid);
+	sw.tid = 0xFFFFU & (++self->rpc_tid);
 	sw.len = (u16)rpc_size;
 	aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
 
@@ -199,7 +197,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
 	do {
 		sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
 
-		PHAL_ATLANTIC->rpc_tid = sw.tid;
+		self->rpc_tid = sw.tid;
 
 		AQ_HW_WAIT_FOR(sw.tid ==
 				(fw.val =
@@ -221,9 +219,9 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
 		if (fw.len) {
 			err =
 			hw_atl_utils_fw_downld_dwords(self,
-						      PHAL_ATLANTIC->rpc_addr,
+						      self->rpc_addr,
 						      (u32 *)(void *)
-						      &PHAL_ATLANTIC->rpc,
+						      &self->rpc,
 						      (fw.len + sizeof(u32) -
 						      sizeof(u8)) /
 						      sizeof(u32));
@@ -231,19 +229,18 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
 				goto err_exit;
 		}
 
-		*rpc = &PHAL_ATLANTIC->rpc;
+		*rpc = &self->rpc;
 	}
 
 err_exit:
 	return err;
 }
 
-static int hw_atl_utils_mpi_create(struct aq_hw_s *self,
-				   struct aq_hw_caps_s *aq_hw_caps)
+static int hw_atl_utils_mpi_create(struct aq_hw_s *self)
 {
 	int err = 0;
 
-	err = hw_atl_utils_init_ucp(self, aq_hw_caps);
+	err = hw_atl_utils_init_ucp(self, self->aq_nic_cfg->aq_hw_caps);
 	if (err < 0)
 		goto err_exit;
 
@@ -259,7 +256,7 @@ int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
 			       struct hw_aq_atl_utils_mbox_header *pmbox)
 {
 	return hw_atl_utils_fw_downld_dwords(self,
-				      PHAL_ATLANTIC->mbox_addr,
+				      self->mbox_addr,
 				      (u32 *)(void *)pmbox,
 				      sizeof(*pmbox) / sizeof(u32));
 }
@@ -270,7 +267,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
 	int err = 0;
 
 	err = hw_atl_utils_fw_downld_dwords(self,
-					    PHAL_ATLANTIC->mbox_addr,
+					    self->mbox_addr,
 					    (u32 *)(void *)pmbox,
 					    sizeof(*pmbox) / sizeof(u32));
 	if (err < 0)
@@ -281,9 +278,9 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
 					self->aq_nic_cfg->mtu : 1514U;
 		pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
 		pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
-		pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc);
+		pmbox->stats.dpc = atomic_read(&self->dpc);
 	} else {
-		pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self);
+		pmbox->stats.dpc = hw_atl_reg_rx_dma_stat_counter7get(self);
 	}
 
 err_exit:;
@@ -365,7 +362,6 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
 }
 
 int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
-				   struct aq_hw_caps_s *aq_hw_caps,
 				   u8 *mac)
 {
 	int err = 0;
@@ -376,9 +372,9 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
 	self->mmio = aq_pci_func_get_mmio(self->aq_pci_func);
 
 	hw_atl_utils_hw_chip_features_init(self,
-					   &PHAL_ATLANTIC_A0->chip_features);
+					   &self->chip_features);
 
-	err = hw_atl_utils_mpi_create(self, aq_hw_caps);
+	err = hw_atl_utils_mpi_create(self);
 	if (err < 0)
 		goto err_exit;
 
@@ -396,7 +392,7 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
 					    aq_hw_read_reg(self, 0x00000374U) +
 					    (40U * 4U),
 					    mac_addr,
-					    AQ_DIMOF(mac_addr));
+					    ARRAY_SIZE(mac_addr));
 	if (err < 0) {
 		mac_addr[0] = 0U;
 		mac_addr[1] = 0U;
@@ -465,7 +461,7 @@ unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
 void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
 {
 	u32 chip_features = 0U;
-	u32 val = reg_glb_mif_id_get(self);
+	u32 val = hw_atl_reg_glb_mif_id_get(self);
 	u32 mif_rev = val & 0xFFU;
 
 	if ((3U & mif_rev) == 1U) {
@@ -500,13 +496,13 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
 
 int hw_atl_utils_update_stats(struct aq_hw_s *self)
 {
-	struct hw_atl_s *hw_self = PHAL_ATLANTIC;
 	struct hw_aq_atl_utils_mbox mbox;
 
 	hw_atl_utils_mpi_read_stats(self, &mbox);
 
-#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
-			mbox.stats._N_ - hw_self->last_stats._N_)
+#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
+			mbox.stats._N_ - self->last_stats._N_)
+
 	if (self->aq_link_status.mbps) {
 		AQ_SDELTA(uprc);
 		AQ_SDELTA(mprc);
@@ -527,19 +523,19 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
 		AQ_SDELTA(dpc);
 	}
 #undef AQ_SDELTA
-	hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
-	hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
-	hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
-	hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
+	self->curr_stats.dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counterlsw_get(self);
+	self->curr_stats.dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counterlsw_get(self);
+	self->curr_stats.dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counterlsw_get(self);
+	self->curr_stats.dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counterlsw_get(self);
 
-	memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
+	memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
 
 	return 0;
 }
 
 struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
 {
-	return &PHAL_ATLANTIC->curr_stats;
+	return &self->curr_stats;
 }
 
 static const u32 hw_atl_utils_hw_mac_regs[] = {
@@ -568,7 +564,7 @@ static const u32 hw_atl_utils_hw_mac_regs[] = {
 };
 
 int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
-			     struct aq_hw_caps_s *aq_hw_caps,
+			     const struct aq_hw_caps_s *aq_hw_caps,
 			     u32 *regs_buff)
 {
 	unsigned int i = 0U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 21aeca6..40e2319 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -14,10 +14,39 @@
 #ifndef HW_ATL_UTILS_H
 #define HW_ATL_UTILS_H
 
-#include "../aq_common.h"
-
 #define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); }
 
+/* Hardware tx descriptor */
+struct __packed hw_atl_txd_s {
+	u64 buf_addr;
+	u32 ctl;
+	u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
+};
+
+/* Hardware tx context descriptor */
+struct __packed hw_atl_txc_s {
+	u32 rsvd;
+	u32 len;
+	u32 ctl;
+	u32 len2;
+};
+
+/* Hardware rx descriptor */
+struct __packed hw_atl_rxd_s {
+	u64 buf_addr;
+	u64 hdr_addr;
+};
+
+/* Hardware rx descriptor writeback */
+struct __packed hw_atl_rxd_wb_s {
+	u32 type;
+	u32 rss_hash;
+	u16 status;
+	u16 pkt_len;
+	u16 next_desc_ptr;
+	u16 vlan;
+};
+
 struct __packed hw_atl_stats_s {
 	u32 uprc;
 	u32 mprc;
@@ -126,26 +155,6 @@ struct __packed hw_aq_atl_utils_mbox {
 	struct hw_atl_stats_s stats;
 };
 
-struct __packed hw_atl_s {
-	struct aq_hw_s base;
-	struct hw_atl_stats_s last_stats;
-	struct aq_stats_s curr_stats;
-	u64 speed;
-	unsigned int chip_features;
-	u32 fw_ver_actual;
-	atomic_t dpc;
-	u32 mbox_addr;
-	u32 rpc_addr;
-	u32 rpc_tid;
-	struct hw_aq_atl_utils_fw_rpc rpc;
-};
-
-#define SELF ((struct hw_atl_s *)self)
-
-#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self)))
-#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self)))
-#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self)))
-
 #define HAL_ATLANTIC_UTILS_CHIP_MIPS         0x00000001U
 #define HAL_ATLANTIC_UTILS_CHIP_TPO2         0x00000002U
 #define HAL_ATLANTIC_UTILS_CHIP_RPF2         0x00000004U
@@ -154,7 +163,7 @@ struct __packed hw_atl_s {
 #define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0  0x02000000U
 
 #define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
-				PHAL_ATLANTIC->chip_features)
+				self->chip_features)
 
 enum hal_atl_utils_fw_state_e {
 	MPI_DEINIT = 0,
@@ -171,6 +180,10 @@ enum hal_atl_utils_fw_state_e {
 #define HAL_ATLANTIC_RATE_100M       BIT(5)
 #define HAL_ATLANTIC_RATE_INVALID    BIT(6)
 
+struct aq_hw_s;
+struct aq_hw_caps_s;
+struct aq_hw_link_status_s;
+
 void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
 
 int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
@@ -189,13 +202,12 @@ int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
 int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
 
 int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
-				   struct aq_hw_caps_s *aq_hw_caps,
 				   u8 *mac);
 
 unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
 
 int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
-			     struct aq_hw_caps_s *aq_hw_caps,
+			     const struct aq_hw_caps_s *aq_hw_caps,
 			     u32 *regs_buff);
 
 int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index cf6ebf1..6b7e996 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,7 +1,7 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
  * Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -107,6 +107,7 @@ enum board_idx {
 	BCM57416_NPAR,
 	BCM57452,
 	BCM57454,
+	BCM5745x_NPAR,
 	BCM58802,
 	BCM58804,
 	BCM58808,
@@ -147,6 +148,7 @@ static const struct {
 	[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
 	[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
 	[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
+	[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
 	[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
 	[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
 	[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -156,6 +158,8 @@ static const struct {
 };
 
 static const struct pci_device_id bnxt_pci_tbl[] = {
+	{ PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
+	{ PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
 	{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
 	{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
 	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
@@ -209,6 +213,7 @@ MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
 
 static const u16 bnxt_vf_req_snif[] = {
 	HWRM_FUNC_CFG,
+	HWRM_FUNC_VF_CFG,
 	HWRM_PORT_PHY_QCFG,
 	HWRM_CFA_L2_FILTER_ALLOC,
 };
@@ -1510,7 +1515,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
 
 		*event |= BNXT_RX_EVENT;
-		goto next_rx_no_prod;
+		goto next_rx_no_prod_no_len;
 
 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
 		skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
@@ -1526,7 +1531,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 			rc = 1;
 		}
 		*event |= BNXT_RX_EVENT;
-		goto next_rx_no_prod;
+		goto next_rx_no_prod_no_len;
 	}
 
 	cons = rxcmp->rx_cmp_opaque;
@@ -1644,9 +1649,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 	rxr->rx_prod = NEXT_RX(prod);
 	rxr->rx_next_cons = NEXT_RX(cons);
 
-next_rx_no_prod:
 	cpr->rx_packets += 1;
 	cpr->rx_bytes += len;
+
+next_rx_no_prod_no_len:
 	*raw_cons = tmp_raw_cons;
 
 	return rc;
@@ -4497,6 +4503,42 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
 	}
 }
 
+static int bnxt_hwrm_get_rings(struct bnxt *bp)
+{
+	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	struct hwrm_func_qcfg_input req = {0};
+	int rc;
+
+	if (bp->hwrm_spec_code < 0x10601)
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+	req.fid = cpu_to_le16(0xffff);
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc) {
+		mutex_unlock(&bp->hwrm_cmd_lock);
+		return -EIO;
+	}
+
+	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+	if (bp->flags & BNXT_FLAG_NEW_RM) {
+		u16 cp, stats;
+
+		hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
+		hw_resc->resv_hw_ring_grps =
+			le32_to_cpu(resp->alloc_hw_ring_grps);
+		hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+		cp = le16_to_cpu(resp->alloc_cmpl_rings);
+		stats = le16_to_cpu(resp->alloc_stat_ctx);
+		cp = min_t(u16, cp, stats);
+		hw_resc->resv_cp_rings = cp;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return 0;
+}
+
 /* Caller must hold bp->hwrm_cmd_lock */
 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
 {
@@ -4516,55 +4558,283 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
 	return rc;
 }
 
-static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
+static int
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+			   int ring_grps, int cp_rings, int vnics)
 {
 	struct hwrm_func_cfg_input req = {0};
+	u32 enables = 0;
 	int rc;
 
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	req.fid = cpu_to_le16(0xffff);
+	enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+	req.num_tx_rings = cpu_to_le16(tx_rings);
+	if (bp->flags & BNXT_FLAG_NEW_RM) {
+		enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+		enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+				      FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+		enables |= ring_grps ?
+			   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+		enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+		req.num_rx_rings = cpu_to_le16(rx_rings);
+		req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+		req.num_cmpl_rings = cpu_to_le16(cp_rings);
+		req.num_stat_ctxs = req.num_cmpl_rings;
+		req.num_vnics = cpu_to_le16(vnics);
+	}
+	if (!enables)
+		return 0;
+
+	req.enables = cpu_to_le32(enables);
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		return -ENOMEM;
+
+	if (bp->hwrm_spec_code < 0x10601)
+		bp->hw_resc.resv_tx_rings = tx_rings;
+
+	rc = bnxt_hwrm_get_rings(bp);
+	return rc;
+}
+
+static int
+bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+			   int ring_grps, int cp_rings, int vnics)
+{
+	struct hwrm_func_vf_cfg_input req = {0};
+	u32 enables = 0;
+	int rc;
+
+	if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
+		bp->hw_resc.resv_tx_rings = tx_rings;
+		return 0;
+	}
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+	enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+	enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+	enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+			      FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+	enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+	enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+	req.num_tx_rings = cpu_to_le16(tx_rings);
+	req.num_rx_rings = cpu_to_le16(rx_rings);
+	req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+	req.num_cmpl_rings = cpu_to_le16(cp_rings);
+	req.num_stat_ctxs = req.num_cmpl_rings;
+	req.num_vnics = cpu_to_le16(vnics);
+
+	req.enables = cpu_to_le32(enables);
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		return -ENOMEM;
+
+	rc = bnxt_hwrm_get_rings(bp);
+	return rc;
+}
+
+static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
+				   int cp, int vnic)
+{
+	if (BNXT_PF(bp))
+		return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic);
+	else
+		return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
+}
+
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+			   bool shared);
+
+static int __bnxt_reserve_rings(struct bnxt *bp)
+{
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	int tx = bp->tx_nr_rings;
+	int rx = bp->rx_nr_rings;
+	int cp = bp->cp_nr_rings;
+	int grp, rx_rings, rc;
+	bool sh = false;
+	int vnic = 1;
+
 	if (bp->hwrm_spec_code < 0x10601)
 		return 0;
 
-	if (BNXT_VF(bp))
+	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+		sh = true;
+	if (bp->flags & BNXT_FLAG_RFS)
+		vnic = rx + 1;
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		rx <<= 1;
+
+	grp = bp->rx_nr_rings;
+	if (tx == hw_resc->resv_tx_rings &&
+	    (!(bp->flags & BNXT_FLAG_NEW_RM) ||
+	      (rx == hw_resc->resv_rx_rings &&
+	       grp == hw_resc->resv_hw_ring_grps &&
+	       cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics)))
 		return 0;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-	req.fid = cpu_to_le16(0xffff);
-	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
-	req.num_tx_rings = cpu_to_le16(*tx_rings);
-	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
 	if (rc)
 		return rc;
 
-	mutex_lock(&bp->hwrm_cmd_lock);
-	rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
-	mutex_unlock(&bp->hwrm_cmd_lock);
-	if (!rc)
-		bp->tx_reserved_rings = *tx_rings;
+	tx = hw_resc->resv_tx_rings;
+	if (bp->flags & BNXT_FLAG_NEW_RM) {
+		rx = hw_resc->resv_rx_rings;
+		cp = hw_resc->resv_cp_rings;
+		grp = hw_resc->resv_hw_ring_grps;
+		vnic = hw_resc->resv_vnics;
+	}
+
+	rx_rings = rx;
+	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+		if (rx >= 2) {
+			rx_rings = rx >> 1;
+		} else {
+			if (netif_running(bp->dev))
+				return -ENOMEM;
+
+			bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+			bp->dev->hw_features &= ~NETIF_F_LRO;
+			bp->dev->features &= ~NETIF_F_LRO;
+			bnxt_set_ring_params(bp);
+		}
+	}
+	rx_rings = min_t(int, rx_rings, grp);
+	rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		rx = rx_rings << 1;
+	cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
+	bp->tx_nr_rings = tx;
+	bp->rx_nr_rings = rx_rings;
+	bp->cp_nr_rings = cp;
+
+	if (!tx || !rx || !cp || !grp || !vnic)
+		return -ENOMEM;
+
 	return rc;
 }
 
-static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
+static bool bnxt_need_reserve_rings(struct bnxt *bp)
 {
-	struct hwrm_func_cfg_input req = {0};
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	int rx = bp->rx_nr_rings;
+	int vnic = 1;
+
+	if (bp->hwrm_spec_code < 0x10601)
+		return false;
+
+	if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
+		return true;
+
+	if (bp->flags & BNXT_FLAG_RFS)
+		vnic = rx + 1;
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		rx <<= 1;
+	if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+	    (hw_resc->resv_rx_rings != rx ||
+	     hw_resc->resv_cp_rings != bp->cp_nr_rings ||
+	     hw_resc->resv_vnics != vnic))
+		return true;
+	return false;
+}
+
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+				    int ring_grps, int cp_rings)
+{
+	struct hwrm_func_vf_cfg_input req = {0};
+	u32 flags, enables;
 	int rc;
 
-	if (bp->hwrm_spec_code < 0x10801)
+	if (!(bp->flags & BNXT_FLAG_NEW_RM))
 		return 0;
 
-	if (BNXT_VF(bp))
-		return 0;
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+	enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
 
-	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
-	req.fid = cpu_to_le16(0xffff);
-	req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
-	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+	req.flags = cpu_to_le32(flags);
+	req.enables = cpu_to_le32(enables);
 	req.num_tx_rings = cpu_to_le16(tx_rings);
+	req.num_rx_rings = cpu_to_le16(rx_rings);
+	req.num_cmpl_rings = cpu_to_le16(cp_rings);
+	req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+	req.num_stat_ctxs = cpu_to_le16(cp_rings);
+	req.num_vnics = cpu_to_le16(1);
+	if (bp->flags & BNXT_FLAG_RFS)
+		req.num_vnics = cpu_to_le16(rx_rings + 1);
 	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 	if (rc)
 		return -ENOMEM;
 	return 0;
 }
 
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+				    int ring_grps, int cp_rings)
+{
+	struct hwrm_func_cfg_input req = {0};
+	u32 flags, enables;
+	int rc;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	req.fid = cpu_to_le16(0xffff);
+	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
+	enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS;
+	req.num_tx_rings = cpu_to_le16(tx_rings);
+	if (bp->flags & BNXT_FLAG_NEW_RM) {
+		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+			 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+			 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+			 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+		enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+			   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+			   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+			   FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+			   FUNC_CFG_REQ_ENABLES_NUM_VNICS;
+		req.num_rx_rings = cpu_to_le16(rx_rings);
+		req.num_cmpl_rings = cpu_to_le16(cp_rings);
+		req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+		req.num_stat_ctxs = cpu_to_le16(cp_rings);
+		req.num_vnics = cpu_to_le16(1);
+		if (bp->flags & BNXT_FLAG_RFS)
+			req.num_vnics = cpu_to_le16(rx_rings + 1);
+	}
+	req.flags = cpu_to_le32(flags);
+	req.enables = cpu_to_le32(enables);
+	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		return -ENOMEM;
+	return 0;
+}
+
+static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+				 int ring_grps, int cp_rings)
+{
+	if (bp->hwrm_spec_code < 0x10801)
+		return 0;
+
+	if (BNXT_PF(bp))
+		return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
+						ring_grps, cp_rings);
+
+	return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
+					cp_rings);
+}
+
 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
 {
@@ -4790,11 +5060,60 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
 	return rc;
 }
 
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
+{
+	struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_resource_qcaps_input req = {0};
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	int rc;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
+	req.fid = cpu_to_le16(0xffff);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc) {
+		rc = -EIO;
+		goto hwrm_func_resc_qcaps_exit;
+	}
+
+	hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
+	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+	hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
+	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+	hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
+	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+	hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
+	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+	hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
+	hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
+	hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
+	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+	hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
+	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
+	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+
+		pf->vf_resv_strategy =
+			le16_to_cpu(resp->vf_reservation_strategy);
+		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
+			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
+	}
+hwrm_func_resc_qcaps_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 {
 	int rc = 0;
 	struct hwrm_func_qcaps_input req = {0};
 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	u32 flags;
 
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
 	req.fid = cpu_to_le16(0xffff);
@@ -4804,16 +5123,27 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	if (rc)
 		goto hwrm_func_qcaps_exit;
 
-	if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
+	flags = le32_to_cpu(resp->flags);
+	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
 		bp->flags |= BNXT_FLAG_ROCEV1_CAP;
-	if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
+	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
 		bp->flags |= BNXT_FLAG_ROCEV2_CAP;
 
 	bp->tx_push_thresh = 0;
-	if (resp->flags &
-	    cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+	if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
 		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
 
+	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+	hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
+	if (!hw_resc->max_hw_ring_grps)
+		hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
+	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
 	if (BNXT_PF(bp)) {
 		struct bnxt_pf_info *pf = &bp->pf;
 
@@ -4821,16 +5151,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 		pf->port_id = le16_to_cpu(resp->port_id);
 		bp->dev->dev_port = pf->port_id;
 		memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
-		pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
-		pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
-		pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
-		pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
-		pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
-		if (!pf->max_hw_ring_grps)
-			pf->max_hw_ring_grps = pf->max_tx_rings;
-		pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
-		pf->max_vnics = le16_to_cpu(resp->max_vnics);
-		pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
 		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
 		pf->max_vfs = le16_to_cpu(resp->max_vfs);
 		pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
@@ -4839,26 +5159,13 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 		pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
 		pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
 		pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
-		if (resp->flags &
-		    cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
+		if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
 			bp->flags |= BNXT_FLAG_WOL_CAP;
 	} else {
 #ifdef CONFIG_BNXT_SRIOV
 		struct bnxt_vf_info *vf = &bp->vf;
 
 		vf->fw_fid = le16_to_cpu(resp->fid);
-
-		vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
-		vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
-		vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
-		vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
-		vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
-		if (!vf->max_hw_ring_grps)
-			vf->max_hw_ring_grps = vf->max_tx_rings;
-		vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
-		vf->max_vnics = le16_to_cpu(resp->max_vnics);
-		vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
-
 		memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
 #endif
 	}
@@ -4868,6 +5175,21 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	return rc;
 }
 
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+	int rc;
+
+	rc = __bnxt_hwrm_func_qcaps(bp);
+	if (rc)
+		return rc;
+	if (bp->hwrm_spec_code >= 0x10803) {
+		rc = bnxt_hwrm_func_resc_qcaps(bp);
+		if (!rc)
+			bp->flags |= BNXT_FLAG_NEW_RM;
+	}
+	return 0;
+}
+
 static int bnxt_hwrm_func_reset(struct bnxt *bp)
 {
 	struct hwrm_func_reset_input req = {0};
@@ -4937,23 +5259,24 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
 
 	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
 
-	bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
-			     resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
-	if (resp->hwrm_intf_maj < 1) {
+	bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
+			     resp->hwrm_intf_min_8b << 8 |
+			     resp->hwrm_intf_upd_8b;
+	if (resp->hwrm_intf_maj_8b < 1) {
 		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
-			    resp->hwrm_intf_maj, resp->hwrm_intf_min,
-			    resp->hwrm_intf_upd);
+			    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+			    resp->hwrm_intf_upd_8b);
 		netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
 	}
 	snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
-		 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
-		 resp->hwrm_fw_rsvd);
+		 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
+		 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
 
 	bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
 	if (!bp->hwrm_cmd_timeout)
 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
 
-	if (resp->hwrm_intf_maj >= 1)
+	if (resp->hwrm_intf_maj_8b >= 1)
 		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
 
 	bp->chip_num = le16_to_cpu(resp->chip_num);
@@ -5089,6 +5412,28 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
 	return rc;
 }
 
+static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
+{
+	struct hwrm_func_cfg_input req = {0};
+	int rc;
+
+	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	req.fid = cpu_to_le16(0xffff);
+	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
+	req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64;
+	if (size == 128)
+		req.cache_linesize =
+			FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128;
+
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		rc = -EIO;
+	return rc;
+}
+
 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
 {
 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -5224,15 +5569,6 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
 				   rc);
 			goto err_out;
 		}
-		if (bp->tx_reserved_rings != bp->tx_nr_rings) {
-			int tx = bp->tx_nr_rings;
-
-			if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
-			    tx < bp->tx_nr_rings) {
-				rc = -ENOMEM;
-				goto err_out;
-			}
-		}
 	}
 
 	rc = bnxt_hwrm_ring_alloc(bp);
@@ -5452,79 +5788,45 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
 #ifdef CONFIG_RFS_ACCEL
 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		return bp->vf.max_rsscos_ctxs;
-#endif
-	return bp->pf.max_rsscos_ctxs;
+	return bp->hw_resc.max_rsscos_ctxs;
 }
 
 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		return bp->vf.max_vnics;
-#endif
-	return bp->pf.max_vnics;
+	return bp->hw_resc.max_vnics;
 }
 #endif
 
 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		return bp->vf.max_stat_ctxs;
-#endif
-	return bp->pf.max_stat_ctxs;
+	return bp->hw_resc.max_stat_ctxs;
 }
 
 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		bp->vf.max_stat_ctxs = max;
-	else
-#endif
-		bp->pf.max_stat_ctxs = max;
+	bp->hw_resc.max_stat_ctxs = max;
 }
 
 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		return bp->vf.max_cp_rings;
-#endif
-	return bp->pf.max_cp_rings;
+	return bp->hw_resc.max_cp_rings;
 }
 
 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		bp->vf.max_cp_rings = max;
-	else
-#endif
-		bp->pf.max_cp_rings = max;
+	bp->hw_resc.max_cp_rings = max;
 }
 
 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		return min_t(unsigned int, bp->vf.max_irqs,
-			     bp->vf.max_cp_rings);
-#endif
-	return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+	return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
 }
 
 void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
 {
-#if defined(CONFIG_BNXT_SRIOV)
-	if (BNXT_VF(bp))
-		bp->vf.max_irqs = max_irqs;
-	else
-#endif
-		bp->pf.max_irqs = max_irqs;
+	bp->hw_resc.max_irqs = max_irqs;
 }
 
 static int bnxt_init_msix(struct bnxt *bp)
@@ -5625,6 +5927,36 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
 	bp->flags &= ~BNXT_FLAG_USING_MSIX;
 }
 
+static int bnxt_reserve_rings(struct bnxt *bp)
+{
+	int orig_cp = bp->hw_resc.resv_cp_rings;
+	int tcs = netdev_get_num_tc(bp->dev);
+	int rc;
+
+	if (!bnxt_need_reserve_rings(bp))
+		return 0;
+
+	rc = __bnxt_reserve_rings(bp);
+	if (rc) {
+		netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
+		return rc;
+	}
+	if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) {
+		bnxt_clear_int_mode(bp);
+		rc = bnxt_init_int_mode(bp);
+		if (rc)
+			return rc;
+	}
+	if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
+		netdev_err(bp->dev, "tx ring reservation failure\n");
+		netdev_reset_tc(bp->dev);
+		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+		return -ENOMEM;
+	}
+	bp->num_stat_ctxs = bp->cp_nr_rings;
+	return 0;
+}
+
 static void bnxt_free_irq(struct bnxt *bp)
 {
 	struct bnxt_irq *irq;
@@ -6375,6 +6707,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
 	bnxt_preset_reg_win(bp);
 	netif_carrier_off(bp->dev);
 	if (irq_re_init) {
+		rc = bnxt_reserve_rings(bp);
+		if (rc)
+			return rc;
+
 		rc = bnxt_setup_int_mode(bp);
 		if (rc) {
 			netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
@@ -6510,23 +6846,13 @@ static bool bnxt_drv_busy(struct bnxt *bp)
 		test_bit(BNXT_STATE_READ_STATS, &bp->state));
 }
 
-int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
+			     bool link_re_init)
 {
-	int rc = 0;
-
-#ifdef CONFIG_BNXT_SRIOV
-	if (bp->sriov_cfg) {
-		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
-						      !bp->sriov_cfg,
-						      BNXT_SRIOV_CFG_WAIT_TMO);
-		if (rc)
-			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
-	}
-
 	/* Close the VF-reps before closing PF */
 	if (BNXT_PF(bp))
 		bnxt_vf_reps_close(bp);
-#endif
+
 	/* Change device state to avoid TX queue wake up's */
 	bnxt_tx_disable(bp);
 
@@ -6549,6 +6875,22 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
 		bnxt_del_napi(bp);
 	}
 	bnxt_free_mem(bp, irq_re_init);
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+	int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+	if (bp->sriov_cfg) {
+		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+						      !bp->sriov_cfg,
+						      BNXT_SRIOV_CFG_WAIT_TMO);
+		if (rc)
+			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+	}
+#endif
+	__bnxt_close_nic(bp, irq_re_init, link_re_init);
 	return rc;
 }
 
@@ -6828,13 +7170,26 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
 	if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
 		max_rss_ctxs = max_vnics;
 	if (vnics > max_vnics || vnics > max_rss_ctxs) {
-		netdev_warn(bp->dev,
-			    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
-			    min(max_rss_ctxs - 1, max_vnics - 1));
+		if (bp->rx_nr_rings > 1)
+			netdev_warn(bp->dev,
+				    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
+				    min(max_rss_ctxs - 1, max_vnics - 1));
 		return false;
 	}
 
-	return true;
+	if (!(bp->flags & BNXT_FLAG_NEW_RM))
+		return true;
+
+	if (vnics == bp->hw_resc.resv_vnics)
+		return true;
+
+	bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics);
+	if (vnics <= bp->hw_resc.resv_vnics)
+		return true;
+
+	netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
+	bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1);
+	return false;
 #else
 	return false;
 #endif
@@ -7169,7 +7524,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
 {
 	int max_rx, max_tx, tx_sets = 1;
 	int tx_rings_needed;
-	int rc;
+	int rx_rings = rx;
+	int cp, rc;
 
 	if (tcs)
 		tx_sets = tcs;
@@ -7185,7 +7541,10 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
 	if (max_tx < tx_rings_needed)
 		return -ENOMEM;
 
-	return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		rx_rings <<= 1;
+	cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
+	return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp);
 }
 
 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -7790,12 +8149,8 @@ int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
 
 	switch (attr->id) {
 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
-		/* In SRIOV each PF-pool (PF + child VFs) serves as a
-		 * switching domain, the PF's perm mac-addr can be used
-		 * as the unique parent-id
-		 */
-		attr->u.ppid.id_len = ETH_ALEN;
-		ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr);
+		attr->u.ppid.id_len = sizeof(bp->switch_id);
+		memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
 		break;
 	default:
 		return -EOPNOTSUPP;
@@ -7940,24 +8295,14 @@ static int bnxt_get_max_irq(struct pci_dev *pdev)
 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
 				int *max_cp)
 {
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 	int max_ring_grps = 0;
 
-#ifdef CONFIG_BNXT_SRIOV
-	if (!BNXT_PF(bp)) {
-		*max_tx = bp->vf.max_tx_rings;
-		*max_rx = bp->vf.max_rx_rings;
-		*max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
-		*max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
-		max_ring_grps = bp->vf.max_hw_ring_grps;
-	} else
-#endif
-	{
-		*max_tx = bp->pf.max_tx_rings;
-		*max_rx = bp->pf.max_rx_rings;
-		*max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
-		*max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
-		max_ring_grps = bp->pf.max_hw_ring_grps;
-	}
+	*max_tx = hw_resc->max_tx_rings;
+	*max_rx = hw_resc->max_rx_rings;
+	*max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
+	*max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
+	max_ring_grps = hw_resc->max_hw_ring_grps;
 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
 		*max_cp -= 1;
 		*max_rx -= 2;
@@ -8022,6 +8367,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
 	return rc;
 }
 
+/* In initial default shared ring setting, each shared ring must have a
+ * RX/TX ring pair.
+ */
+static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
+{
+	bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
+	bp->rx_nr_rings = bp->cp_nr_rings;
+	bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
+	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+}
+
 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
 {
 	int dflt_rings, max_rx_rings, max_tx_rings, rc;
@@ -8037,14 +8393,26 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
 		return rc;
 	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
 	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+	if (sh)
+		bnxt_trim_dflt_sh_rings(bp);
+	else
+		bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
+	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
 
-	rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
+	rc = __bnxt_reserve_rings(bp);
 	if (rc)
 		netdev_warn(bp->dev, "Unable to reserve tx rings\n");
+	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+	if (sh)
+		bnxt_trim_dflt_sh_rings(bp);
 
-	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
-	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
-			       bp->tx_nr_rings + bp->rx_nr_rings;
+	/* Rings may have been trimmed, re-reserve the trimmed rings. */
+	if (bnxt_need_reserve_rings(bp)) {
+		rc = __bnxt_reserve_rings(bp);
+		if (rc)
+			netdev_warn(bp->dev, "2nd rings reservation failed.\n");
+		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+	}
 	bp->num_stat_ctxs = bp->cp_nr_rings;
 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
 		bp->rx_nr_rings++;
@@ -8053,11 +8421,23 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
 	return rc;
 }
 
-void bnxt_restore_pf_fw_resources(struct bnxt *bp)
+int bnxt_restore_pf_fw_resources(struct bnxt *bp)
 {
+	int rc;
+
 	ASSERT_RTNL();
+	if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+		return 0;
+
 	bnxt_hwrm_func_qcaps(bp);
-	bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
+	__bnxt_close_nic(bp, true, false);
+	bnxt_clear_int_mode(bp);
+	rc = bnxt_init_int_mode(bp);
+	if (rc)
+		dev_close(bp->dev);
+	else
+		rc = bnxt_open_nic(bp, true, false);
+	return rc;
 }
 
 static int bnxt_init_mac_addr(struct bnxt *bp)
@@ -8071,7 +8451,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
 		struct bnxt_vf_info *vf = &bp->vf;
 
 		if (is_valid_ether_addr(vf->mac_addr)) {
-			/* overwrite netdev dev_adr with admin VF MAC */
+			/* overwrite netdev dev_addr with admin VF MAC */
 			memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
 		} else {
 			eth_hw_addr_random(bp->dev);
@@ -8283,6 +8663,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	else
 		device_set_wakeup_capable(&pdev->dev, false);
 
+	bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
+
 	if (BNXT_PF(bp)) {
 		if (!bnxt_pf_wq) {
 			bnxt_pf_wq =
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 89887a8..1989c47 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1,7 +1,7 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
  * Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -12,10 +12,10 @@
 #define BNXT_H
 
 #define DRV_MODULE_NAME		"bnxt_en"
-#define DRV_MODULE_VERSION	"1.8.0"
+#define DRV_MODULE_VERSION	"1.9.0"
 
 #define DRV_VER_MAJ	1
-#define DRV_VER_MIN	8
+#define DRV_VER_MIN	9
 #define DRV_VER_UPD	0
 
 #include <linux/interrupt.h>
@@ -776,19 +776,38 @@ struct bnxt_vnic_info {
 #define BNXT_VNIC_RFS_NEW_RSS_FLAG	0x10
 };
 
+struct bnxt_hw_resc {
+	u16	min_rsscos_ctxs;
+	u16	max_rsscos_ctxs;
+	u16	min_cp_rings;
+	u16	max_cp_rings;
+	u16	resv_cp_rings;
+	u16	min_tx_rings;
+	u16	max_tx_rings;
+	u16	resv_tx_rings;
+	u16	min_rx_rings;
+	u16	max_rx_rings;
+	u16	resv_rx_rings;
+	u16	min_hw_ring_grps;
+	u16	max_hw_ring_grps;
+	u16	resv_hw_ring_grps;
+	u16	min_l2_ctxs;
+	u16	max_l2_ctxs;
+	u16	min_vnics;
+	u16	max_vnics;
+	u16	resv_vnics;
+	u16	min_stat_ctxs;
+	u16	max_stat_ctxs;
+	u16	max_irqs;
+};
+
 #if defined(CONFIG_BNXT_SRIOV)
 struct bnxt_vf_info {
 	u16	fw_fid;
-	u8	mac_addr[ETH_ALEN];
-	u16	max_rsscos_ctxs;
-	u16	max_cp_rings;
-	u16	max_tx_rings;
-	u16	max_rx_rings;
-	u16	max_hw_ring_grps;
-	u16	max_l2_ctxs;
-	u16	max_irqs;
-	u16	max_vnics;
-	u16	max_stat_ctxs;
+	u8	mac_addr[ETH_ALEN];	/* PF assigned MAC Address */
+	u8	vf_mac_addr[ETH_ALEN];	/* VF assigned MAC address, only
+					 * stored by PF.
+					 */
 	u16	vlan;
 	u32	flags;
 #define BNXT_VF_QOS		0x1
@@ -809,15 +828,6 @@ struct bnxt_pf_info {
 	u16	fw_fid;
 	u16	port_id;
 	u8	mac_addr[ETH_ALEN];
-	u16	max_rsscos_ctxs;
-	u16	max_cp_rings;
-	u16	max_tx_rings; /* HW assigned max tx rings for this PF */
-	u16	max_rx_rings; /* HW assigned max rx rings for this PF */
-	u16	max_hw_ring_grps;
-	u16	max_irqs;
-	u16	max_l2_ctxs;
-	u16	max_vnics;
-	u16	max_stat_ctxs;
 	u32	first_vf_id;
 	u16	active_vfs;
 	u16	max_vfs;
@@ -829,6 +839,9 @@ struct bnxt_pf_info {
 	u32	max_rx_wm_flows;
 	unsigned long	*vf_event_bmap;
 	u16	hwrm_cmd_req_pages;
+	u8	vf_resv_strategy;
+#define BNXT_VF_RESV_STRATEGY_MAXIMAL	0
+#define BNXT_VF_RESV_STRATEGY_MINIMAL	1
 	void			*hwrm_cmd_req_addr[4];
 	dma_addr_t		hwrm_cmd_req_dma_addr[4];
 	struct bnxt_vf_info	*vf;
@@ -1137,6 +1150,7 @@ struct bnxt {
 	#define BNXT_FLAG_FW_DCBX_AGENT	0x800000
 	#define BNXT_FLAG_CHIP_NITRO_A0	0x1000000
 	#define BNXT_FLAG_DIM		0x2000000
+	#define BNXT_FLAG_NEW_RM	0x8000000
 
 	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\
 					    BNXT_FLAG_RFS |		\
@@ -1196,7 +1210,6 @@ struct bnxt {
 	int			tx_nr_rings;
 	int			tx_nr_rings_per_tc;
 	int			tx_nr_rings_xdp;
-	int			tx_reserved_rings;
 
 	int			tx_wake_thresh;
 	int			tx_push_thresh;
@@ -1308,6 +1321,7 @@ struct bnxt {
 #define BNXT_LINK_SPEED_CHNG_SP_EVENT	14
 #define BNXT_FLOW_STATS_SP_EVENT	15
 
+	struct bnxt_hw_resc	hw_resc;
 	struct bnxt_pf_info	pf;
 #ifdef CONFIG_BNXT_SRIOV
 	int			nr_vfs;
@@ -1357,6 +1371,7 @@ struct bnxt {
 	enum devlink_eswitch_mode eswitch_mode;
 	struct bnxt_vf_rep	**vf_reps; /* array of vf-rep ptrs */
 	u16			*cfa_code_map; /* cfa_code -> vf_idx map */
+	u8			switch_id[8];
 	struct bnxt_tc_info	*tc_info;
 };
 
@@ -1432,7 +1447,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
 		     int tx_xdp);
 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
 int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
-void bnxt_restore_pf_fw_resources(struct bnxt *bp);
+int bnxt_restore_pf_fw_resources(struct bnxt *bp);
 int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
 void bnxt_dim_work(struct work_struct *work);
 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index c99f4d0..82d17f8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,2437 +1,2700 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
  * Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
+ *
+ * DO NOT MODIFY!!! This file is automatically generated.
  */
 
-#ifndef BNXT_HSI_H
-#define BNXT_HSI_H
+#ifndef _BNXT_HSI_H_
+#define _BNXT_HSI_H_
 
-/* HSI and HWRM Specification 1.8.3 */
-#define HWRM_VERSION_MAJOR	1
-#define HWRM_VERSION_MINOR	8
-#define HWRM_VERSION_UPDATE	3
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+};
 
-#define HWRM_VERSION_RSVD	1 /* non-zero means beta version */
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+};
 
-#define HWRM_VERSION_STR	"1.8.3.1"
-/*
- * Following is the signature for HWRM message field that indicates not
- * applicable (All F's). Need to cast it the size of the field if needed.
- */
-#define HWRM_NA_SIGNATURE	((__le32)(-1))
-#define HWRM_MAX_REQ_LEN    (128)  /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN    (280)  /* hwrm_selftest_qlist */
-#define HW_HASH_INDEX_SIZE      0x80    /* 7 bit indirection table index. */
-#define HW_HASH_KEY_SIZE	40
-#define HWRM_RESP_VALID_KEY      1 /* valid key for HWRM response */
+#define CMD_DISCR_TLV_ENCAP 0x8000UL
+#define CMD_DISCR_LAST     CMD_DISCR_TLV_ENCAP
 
-/* Statistics Ejection Buffer Completion Record (16 bytes) */
+
+#define TLV_TYPE_HWRM_REQUEST                    0x1UL
+#define TLV_TYPE_HWRM_RESPONSE                   0x2UL
+#define TLV_TYPE_ROCE_SP_COMMAND                 0x3UL
+#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
+#define TLV_TYPE_ENGINE_CKV_NONCE                0x8002UL
+#define TLV_TYPE_ENGINE_CKV_IV                   0x8003UL
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG             0x8004UL
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT           0x8005UL
+#define TLV_TYPE_ENGINE_CKV_ALGORITHMS           0x8006UL
+#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY       0x8007UL
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE      0x8008UL
+#define TLV_TYPE_LAST                           TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
+
+
+/* tlv (size:64b/8B) */
+struct tlv {
+	__le16	cmd_discr;
+	u8	reserved_8b;
+	u8	flags;
+	#define TLV_FLAGS_MORE         0x1UL
+	#define TLV_FLAGS_MORE_LAST      0x0UL
+	#define TLV_FLAGS_MORE_NOT_LAST  0x1UL
+	#define TLV_FLAGS_REQUIRED     0x2UL
+	#define TLV_FLAGS_REQUIRED_NO    (0x0UL << 1)
+	#define TLV_FLAGS_REQUIRED_YES   (0x1UL << 1)
+	#define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+	__le16	tlv_type;
+	__le16	length;
+};
+
+/* input (size:128b/16B) */
+struct input {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+};
+
+/* output (size:64b/8B) */
+struct output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+};
+
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+	__le16	req_type;
+	__le16	signature;
+	#define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+	#define SHORT_REQ_SIGNATURE_LAST     SHORT_REQ_SIGNATURE_SHORT_CMD
+	__le16	unused_0;
+	__le16	size;
+	__le64	req_addr;
+};
+
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+	__le16	req_type;
+	#define HWRM_VER_GET                              0x0UL
+	#define HWRM_FUNC_BUF_UNRGTR                      0xeUL
+	#define HWRM_FUNC_VF_CFG                          0xfUL
+	#define HWRM_RESERVED1                            0x10UL
+	#define HWRM_FUNC_RESET                           0x11UL
+	#define HWRM_FUNC_GETFID                          0x12UL
+	#define HWRM_FUNC_VF_ALLOC                        0x13UL
+	#define HWRM_FUNC_VF_FREE                         0x14UL
+	#define HWRM_FUNC_QCAPS                           0x15UL
+	#define HWRM_FUNC_QCFG                            0x16UL
+	#define HWRM_FUNC_CFG                             0x17UL
+	#define HWRM_FUNC_QSTATS                          0x18UL
+	#define HWRM_FUNC_CLR_STATS                       0x19UL
+	#define HWRM_FUNC_DRV_UNRGTR                      0x1aUL
+	#define HWRM_FUNC_VF_RESC_FREE                    0x1bUL
+	#define HWRM_FUNC_VF_VNIC_IDS_QUERY               0x1cUL
+	#define HWRM_FUNC_DRV_RGTR                        0x1dUL
+	#define HWRM_FUNC_DRV_QVER                        0x1eUL
+	#define HWRM_FUNC_BUF_RGTR                        0x1fUL
+	#define HWRM_PORT_PHY_CFG                         0x20UL
+	#define HWRM_PORT_MAC_CFG                         0x21UL
+	#define HWRM_PORT_TS_QUERY                        0x22UL
+	#define HWRM_PORT_QSTATS                          0x23UL
+	#define HWRM_PORT_LPBK_QSTATS                     0x24UL
+	#define HWRM_PORT_CLR_STATS                       0x25UL
+	#define HWRM_PORT_LPBK_CLR_STATS                  0x26UL
+	#define HWRM_PORT_PHY_QCFG                        0x27UL
+	#define HWRM_PORT_MAC_QCFG                        0x28UL
+	#define HWRM_PORT_MAC_PTP_QCFG                    0x29UL
+	#define HWRM_PORT_PHY_QCAPS                       0x2aUL
+	#define HWRM_PORT_PHY_I2C_WRITE                   0x2bUL
+	#define HWRM_PORT_PHY_I2C_READ                    0x2cUL
+	#define HWRM_PORT_LED_CFG                         0x2dUL
+	#define HWRM_PORT_LED_QCFG                        0x2eUL
+	#define HWRM_PORT_LED_QCAPS                       0x2fUL
+	#define HWRM_QUEUE_QPORTCFG                       0x30UL
+	#define HWRM_QUEUE_QCFG                           0x31UL
+	#define HWRM_QUEUE_CFG                            0x32UL
+	#define HWRM_FUNC_VLAN_CFG                        0x33UL
+	#define HWRM_FUNC_VLAN_QCFG                       0x34UL
+	#define HWRM_QUEUE_PFCENABLE_QCFG                 0x35UL
+	#define HWRM_QUEUE_PFCENABLE_CFG                  0x36UL
+	#define HWRM_QUEUE_PRI2COS_QCFG                   0x37UL
+	#define HWRM_QUEUE_PRI2COS_CFG                    0x38UL
+	#define HWRM_QUEUE_COS2BW_QCFG                    0x39UL
+	#define HWRM_QUEUE_COS2BW_CFG                     0x3aUL
+	#define HWRM_QUEUE_DSCP_QCAPS                     0x3bUL
+	#define HWRM_QUEUE_DSCP2PRI_QCFG                  0x3cUL
+	#define HWRM_QUEUE_DSCP2PRI_CFG                   0x3dUL
+	#define HWRM_VNIC_ALLOC                           0x40UL
+	#define HWRM_VNIC_FREE                            0x41UL
+	#define HWRM_VNIC_CFG                             0x42UL
+	#define HWRM_VNIC_QCFG                            0x43UL
+	#define HWRM_VNIC_TPA_CFG                         0x44UL
+	#define HWRM_VNIC_TPA_QCFG                        0x45UL
+	#define HWRM_VNIC_RSS_CFG                         0x46UL
+	#define HWRM_VNIC_RSS_QCFG                        0x47UL
+	#define HWRM_VNIC_PLCMODES_CFG                    0x48UL
+	#define HWRM_VNIC_PLCMODES_QCFG                   0x49UL
+	#define HWRM_VNIC_QCAPS                           0x4aUL
+	#define HWRM_RING_ALLOC                           0x50UL
+	#define HWRM_RING_FREE                            0x51UL
+	#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS        0x52UL
+	#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS     0x53UL
+	#define HWRM_RING_RESET                           0x5eUL
+	#define HWRM_RING_GRP_ALLOC                       0x60UL
+	#define HWRM_RING_GRP_FREE                        0x61UL
+	#define HWRM_RESERVED5                            0x64UL
+	#define HWRM_RESERVED6                            0x65UL
+	#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC            0x70UL
+	#define HWRM_VNIC_RSS_COS_LB_CTX_FREE             0x71UL
+	#define HWRM_CFA_L2_FILTER_ALLOC                  0x90UL
+	#define HWRM_CFA_L2_FILTER_FREE                   0x91UL
+	#define HWRM_CFA_L2_FILTER_CFG                    0x92UL
+	#define HWRM_CFA_L2_SET_RX_MASK                   0x93UL
+	#define HWRM_CFA_VLAN_ANTISPOOF_CFG               0x94UL
+	#define HWRM_CFA_TUNNEL_FILTER_ALLOC              0x95UL
+	#define HWRM_CFA_TUNNEL_FILTER_FREE               0x96UL
+	#define HWRM_CFA_ENCAP_RECORD_ALLOC               0x97UL
+	#define HWRM_CFA_ENCAP_RECORD_FREE                0x98UL
+	#define HWRM_CFA_NTUPLE_FILTER_ALLOC              0x99UL
+	#define HWRM_CFA_NTUPLE_FILTER_FREE               0x9aUL
+	#define HWRM_CFA_NTUPLE_FILTER_CFG                0x9bUL
+	#define HWRM_CFA_EM_FLOW_ALLOC                    0x9cUL
+	#define HWRM_CFA_EM_FLOW_FREE                     0x9dUL
+	#define HWRM_CFA_EM_FLOW_CFG                      0x9eUL
+	#define HWRM_TUNNEL_DST_PORT_QUERY                0xa0UL
+	#define HWRM_TUNNEL_DST_PORT_ALLOC                0xa1UL
+	#define HWRM_TUNNEL_DST_PORT_FREE                 0xa2UL
+	#define HWRM_STAT_CTX_ALLOC                       0xb0UL
+	#define HWRM_STAT_CTX_FREE                        0xb1UL
+	#define HWRM_STAT_CTX_QUERY                       0xb2UL
+	#define HWRM_STAT_CTX_CLR_STATS                   0xb3UL
+	#define HWRM_FW_RESET                             0xc0UL
+	#define HWRM_FW_QSTATUS                           0xc1UL
+	#define HWRM_FW_SET_TIME                          0xc8UL
+	#define HWRM_FW_GET_TIME                          0xc9UL
+	#define HWRM_FW_SET_STRUCTURED_DATA               0xcaUL
+	#define HWRM_FW_GET_STRUCTURED_DATA               0xcbUL
+	#define HWRM_FW_IPC_MAILBOX                       0xccUL
+	#define HWRM_EXEC_FWD_RESP                        0xd0UL
+	#define HWRM_REJECT_FWD_RESP                      0xd1UL
+	#define HWRM_FWD_RESP                             0xd2UL
+	#define HWRM_FWD_ASYNC_EVENT_CMPL                 0xd3UL
+	#define HWRM_TEMP_MONITOR_QUERY                   0xe0UL
+	#define HWRM_WOL_FILTER_ALLOC                     0xf0UL
+	#define HWRM_WOL_FILTER_FREE                      0xf1UL
+	#define HWRM_WOL_FILTER_QCFG                      0xf2UL
+	#define HWRM_WOL_REASON_QCFG                      0xf3UL
+	#define HWRM_CFA_METER_PROFILE_ALLOC              0xf5UL
+	#define HWRM_CFA_METER_PROFILE_FREE               0xf6UL
+	#define HWRM_CFA_METER_PROFILE_CFG                0xf7UL
+	#define HWRM_CFA_METER_INSTANCE_ALLOC             0xf8UL
+	#define HWRM_CFA_METER_INSTANCE_FREE              0xf9UL
+	#define HWRM_CFA_VFR_ALLOC                        0xfdUL
+	#define HWRM_CFA_VFR_FREE                         0xfeUL
+	#define HWRM_CFA_VF_PAIR_ALLOC                    0x100UL
+	#define HWRM_CFA_VF_PAIR_FREE                     0x101UL
+	#define HWRM_CFA_VF_PAIR_INFO                     0x102UL
+	#define HWRM_CFA_FLOW_ALLOC                       0x103UL
+	#define HWRM_CFA_FLOW_FREE                        0x104UL
+	#define HWRM_CFA_FLOW_FLUSH                       0x105UL
+	#define HWRM_CFA_FLOW_STATS                       0x106UL
+	#define HWRM_CFA_FLOW_INFO                        0x107UL
+	#define HWRM_CFA_DECAP_FILTER_ALLOC               0x108UL
+	#define HWRM_CFA_DECAP_FILTER_FREE                0x109UL
+	#define HWRM_CFA_VLAN_ANTISPOOF_QCFG              0x10aUL
+	#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC       0x10bUL
+	#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE        0x10cUL
+	#define HWRM_CFA_PAIR_ALLOC                       0x10dUL
+	#define HWRM_CFA_PAIR_FREE                        0x10eUL
+	#define HWRM_CFA_PAIR_INFO                        0x10fUL
+	#define HWRM_FW_IPC_MSG                           0x110UL
+	#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO        0x111UL
+	#define HWRM_ENGINE_CKV_HELLO                     0x12dUL
+	#define HWRM_ENGINE_CKV_STATUS                    0x12eUL
+	#define HWRM_ENGINE_CKV_CKEK_ADD                  0x12fUL
+	#define HWRM_ENGINE_CKV_CKEK_DELETE               0x130UL
+	#define HWRM_ENGINE_CKV_KEY_ADD                   0x131UL
+	#define HWRM_ENGINE_CKV_KEY_DELETE                0x132UL
+	#define HWRM_ENGINE_CKV_FLUSH                     0x133UL
+	#define HWRM_ENGINE_CKV_RNG_GET                   0x134UL
+	#define HWRM_ENGINE_CKV_KEY_GEN                   0x135UL
+	#define HWRM_ENGINE_QG_CONFIG_QUERY               0x13cUL
+	#define HWRM_ENGINE_QG_QUERY                      0x13dUL
+	#define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
+	#define HWRM_ENGINE_QG_METER_PROFILE_QUERY        0x13fUL
+	#define HWRM_ENGINE_QG_METER_PROFILE_ALLOC        0x140UL
+	#define HWRM_ENGINE_QG_METER_PROFILE_FREE         0x141UL
+	#define HWRM_ENGINE_QG_METER_QUERY                0x142UL
+	#define HWRM_ENGINE_QG_METER_BIND                 0x143UL
+	#define HWRM_ENGINE_QG_METER_UNBIND               0x144UL
+	#define HWRM_ENGINE_QG_FUNC_BIND                  0x145UL
+	#define HWRM_ENGINE_SG_CONFIG_QUERY               0x146UL
+	#define HWRM_ENGINE_SG_QUERY                      0x147UL
+	#define HWRM_ENGINE_SG_METER_QUERY                0x148UL
+	#define HWRM_ENGINE_SG_METER_CONFIG               0x149UL
+	#define HWRM_ENGINE_SG_QG_BIND                    0x14aUL
+	#define HWRM_ENGINE_QG_SG_UNBIND                  0x14bUL
+	#define HWRM_ENGINE_CONFIG_QUERY                  0x154UL
+	#define HWRM_ENGINE_STATS_CONFIG                  0x155UL
+	#define HWRM_ENGINE_STATS_CLEAR                   0x156UL
+	#define HWRM_ENGINE_STATS_QUERY                   0x157UL
+	#define HWRM_ENGINE_RQ_ALLOC                      0x15eUL
+	#define HWRM_ENGINE_RQ_FREE                       0x15fUL
+	#define HWRM_ENGINE_CQ_ALLOC                      0x160UL
+	#define HWRM_ENGINE_CQ_FREE                       0x161UL
+	#define HWRM_ENGINE_NQ_ALLOC                      0x162UL
+	#define HWRM_ENGINE_NQ_FREE                       0x163UL
+	#define HWRM_ENGINE_ON_DIE_RQE_CREDITS            0x164UL
+	#define HWRM_FUNC_RESOURCE_QCAPS                  0x190UL
+	#define HWRM_FUNC_VF_RESOURCE_CFG                 0x191UL
+	#define HWRM_SELFTEST_QLIST                       0x200UL
+	#define HWRM_SELFTEST_EXEC                        0x201UL
+	#define HWRM_SELFTEST_IRQ                         0x202UL
+	#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA        0x203UL
+	#define HWRM_DBG_READ_DIRECT                      0xff10UL
+	#define HWRM_DBG_READ_INDIRECT                    0xff11UL
+	#define HWRM_DBG_WRITE_DIRECT                     0xff12UL
+	#define HWRM_DBG_WRITE_INDIRECT                   0xff13UL
+	#define HWRM_DBG_DUMP                             0xff14UL
+	#define HWRM_DBG_ERASE_NVM                        0xff15UL
+	#define HWRM_DBG_CFG                              0xff16UL
+	#define HWRM_DBG_COREDUMP_LIST                    0xff17UL
+	#define HWRM_DBG_COREDUMP_INITIATE                0xff18UL
+	#define HWRM_DBG_COREDUMP_RETRIEVE                0xff19UL
+	#define HWRM_NVM_FACTORY_DEFAULTS                 0xffeeUL
+	#define HWRM_NVM_VALIDATE_OPTION                  0xffefUL
+	#define HWRM_NVM_FLUSH                            0xfff0UL
+	#define HWRM_NVM_GET_VARIABLE                     0xfff1UL
+	#define HWRM_NVM_SET_VARIABLE                     0xfff2UL
+	#define HWRM_NVM_INSTALL_UPDATE                   0xfff3UL
+	#define HWRM_NVM_MODIFY                           0xfff4UL
+	#define HWRM_NVM_VERIFY_UPDATE                    0xfff5UL
+	#define HWRM_NVM_GET_DEV_INFO                     0xfff6UL
+	#define HWRM_NVM_ERASE_DIR_ENTRY                  0xfff7UL
+	#define HWRM_NVM_MOD_DIR_ENTRY                    0xfff8UL
+	#define HWRM_NVM_FIND_DIR_ENTRY                   0xfff9UL
+	#define HWRM_NVM_GET_DIR_ENTRIES                  0xfffaUL
+	#define HWRM_NVM_GET_DIR_INFO                     0xfffbUL
+	#define HWRM_NVM_RAW_DUMP                         0xfffcUL
+	#define HWRM_NVM_READ                             0xfffdUL
+	#define HWRM_NVM_WRITE                            0xfffeUL
+	#define HWRM_NVM_RAW_WRITE_BLK                    0xffffUL
+	#define HWRM_LAST                                HWRM_NVM_RAW_WRITE_BLK
+	__le16	unused_0[3];
+};
+
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+	__le16	error_code;
+	#define HWRM_ERR_CODE_SUCCESS                0x0UL
+	#define HWRM_ERR_CODE_FAIL                   0x1UL
+	#define HWRM_ERR_CODE_INVALID_PARAMS         0x2UL
+	#define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+	#define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR   0x4UL
+	#define HWRM_ERR_CODE_INVALID_FLAGS          0x5UL
+	#define HWRM_ERR_CODE_INVALID_ENABLES        0x6UL
+	#define HWRM_ERR_CODE_UNSUPPORTED_TLV        0x7UL
+	#define HWRM_ERR_CODE_NO_BUFFER              0x8UL
+	#define HWRM_ERR_CODE_HWRM_ERROR             0xfUL
+	#define HWRM_ERR_CODE_UNKNOWN_ERR            0xfffeUL
+	#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED      0xffffUL
+	#define HWRM_ERR_CODE_LAST                  HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+	__le16	unused_0[3];
+};
+
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	opaque_0;
+	__le16	opaque_1;
+	u8	cmd_err;
+	u8	valid;
+};
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN 128
+#define HWRM_MAX_RESP_LEN 280
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 9
+#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_RSVD 0
+#define HWRM_VERSION_STR "1.9.0.0"
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	hwrm_intf_maj;
+	u8	hwrm_intf_min;
+	u8	hwrm_intf_upd;
+	u8	unused_0[5];
+};
+
+/* hwrm_ver_get_output (size:1408b/176B) */
+struct hwrm_ver_get_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	hwrm_intf_maj_8b;
+	u8	hwrm_intf_min_8b;
+	u8	hwrm_intf_upd_8b;
+	u8	hwrm_intf_rsvd_8b;
+	u8	hwrm_fw_maj_8b;
+	u8	hwrm_fw_min_8b;
+	u8	hwrm_fw_bld_8b;
+	u8	hwrm_fw_rsvd_8b;
+	u8	mgmt_fw_maj_8b;
+	u8	mgmt_fw_min_8b;
+	u8	mgmt_fw_bld_8b;
+	u8	mgmt_fw_rsvd_8b;
+	u8	netctrl_fw_maj_8b;
+	u8	netctrl_fw_min_8b;
+	u8	netctrl_fw_bld_8b;
+	u8	netctrl_fw_rsvd_8b;
+	__le32	dev_caps_cfg;
+	#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED     0x1UL
+	#define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED     0x2UL
+	#define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED         0x4UL
+	#define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED          0x8UL
+	u8	roce_fw_maj_8b;
+	u8	roce_fw_min_8b;
+	u8	roce_fw_bld_8b;
+	u8	roce_fw_rsvd_8b;
+	char	hwrm_fw_name[16];
+	char	mgmt_fw_name[16];
+	char	netctrl_fw_name[16];
+	u8	reserved2[16];
+	char	roce_fw_name[16];
+	__le16	chip_num;
+	u8	chip_rev;
+	u8	chip_metal;
+	u8	chip_bond_id;
+	u8	chip_platform_type;
+	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC      0x0UL
+	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA      0x1UL
+	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
+	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST     VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
+	__le16	max_req_win_len;
+	__le16	max_resp_len;
+	__le16	def_req_timeout;
+	u8	flags;
+	#define VER_GET_RESP_FLAGS_DEV_NOT_RDY       0x1UL
+	#define VER_GET_RESP_FLAGS_EXT_VER_AVAIL     0x2UL
+	u8	unused_0[2];
+	u8	always_1;
+	__le16	hwrm_intf_major;
+	__le16	hwrm_intf_minor;
+	__le16	hwrm_intf_build;
+	__le16	hwrm_intf_patch;
+	__le16	hwrm_fw_major;
+	__le16	hwrm_fw_minor;
+	__le16	hwrm_fw_build;
+	__le16	hwrm_fw_patch;
+	__le16	mgmt_fw_major;
+	__le16	mgmt_fw_minor;
+	__le16	mgmt_fw_build;
+	__le16	mgmt_fw_patch;
+	__le16	netctrl_fw_major;
+	__le16	netctrl_fw_minor;
+	__le16	netctrl_fw_build;
+	__le16	netctrl_fw_patch;
+	__le16	roce_fw_major;
+	__le16	roce_fw_minor;
+	__le16	roce_fw_build;
+	__le16	roce_fw_patch;
+	__le16	max_ext_req_len;
+	u8	unused_1[5];
+	u8	valid;
+};
+
+/* eject_cmpl (size:128b/16B) */
 struct eject_cmpl {
-	__le16 type;
-	#define EJECT_CMPL_TYPE_MASK				    0x3fUL
-	#define EJECT_CMPL_TYPE_SFT				    0
-	#define EJECT_CMPL_TYPE_STAT_EJECT			   0x1aUL
-	__le16 len;
-	__le32 opaque;
-	__le32 v;
-	#define EJECT_CMPL_V					    0x1UL
-	__le32 unused_2;
+	__le16	type;
+	#define EJECT_CMPL_TYPE_MASK      0x3fUL
+	#define EJECT_CMPL_TYPE_SFT       0
+	#define EJECT_CMPL_TYPE_STAT_EJECT  0x1aUL
+	#define EJECT_CMPL_TYPE_LAST       EJECT_CMPL_TYPE_STAT_EJECT
+	__le16	len;
+	__le32	opaque;
+	__le32	v;
+	#define EJECT_CMPL_V     0x1UL
+	__le32	unused_2;
 };
 
-/* HWRM Completion Record (16 bytes) */
+/* hwrm_cmpl (size:128b/16B) */
 struct hwrm_cmpl {
-	__le16 type;
-	#define CMPL_TYPE_MASK					    0x3fUL
-	#define CMPL_TYPE_SFT					    0
-	#define CMPL_TYPE_HWRM_DONE				   0x20UL
-	__le16 sequence_id;
-	__le32 unused_1;
-	__le32 v;
-	#define CMPL_V						    0x1UL
-	__le32 unused_3;
+	__le16	type;
+	#define CMPL_TYPE_MASK     0x3fUL
+	#define CMPL_TYPE_SFT      0
+	#define CMPL_TYPE_HWRM_DONE  0x20UL
+	#define CMPL_TYPE_LAST      CMPL_TYPE_HWRM_DONE
+	__le16	sequence_id;
+	__le32	unused_1;
+	__le32	v;
+	#define CMPL_V     0x1UL
+	__le32	unused_3;
 };
 
-/* HWRM Forwarded Request (16 bytes) */
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
 struct hwrm_fwd_req_cmpl {
-	__le16 req_len_type;
-	#define FWD_REQ_CMPL_TYPE_MASK				    0x3fUL
-	#define FWD_REQ_CMPL_TYPE_SFT				    0
-	#define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ			   0x22UL
-	#define FWD_REQ_CMPL_REQ_LEN_MASK			    0xffc0UL
-	#define FWD_REQ_CMPL_REQ_LEN_SFT			    6
-	__le16 source_id;
-	__le32 unused_0;
-	__le32 req_buf_addr_v[2];
-	#define FWD_REQ_CMPL_V					    0x1UL
-	#define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK			    0xfffffffeUL
-	#define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT			    1
+	__le16	req_len_type;
+	#define FWD_REQ_CMPL_TYPE_MASK        0x3fUL
+	#define FWD_REQ_CMPL_TYPE_SFT         0
+	#define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ  0x22UL
+	#define FWD_REQ_CMPL_TYPE_LAST         FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
+	#define FWD_REQ_CMPL_REQ_LEN_MASK     0xffc0UL
+	#define FWD_REQ_CMPL_REQ_LEN_SFT      6
+	__le16	source_id;
+	__le32	unused0;
+	__le32	req_buf_addr_v[2];
+	#define FWD_REQ_CMPL_V                0x1UL
+	#define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+	#define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
 };
 
-/* HWRM Forwarded Response (16 bytes) */
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
 struct hwrm_fwd_resp_cmpl {
-	__le16 type;
-	#define FWD_RESP_CMPL_TYPE_MASK			    0x3fUL
-	#define FWD_RESP_CMPL_TYPE_SFT				    0
-	#define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP		   0x24UL
-	__le16 source_id;
-	__le16 resp_len;
-	__le16 unused_1;
-	__le32 resp_buf_addr_v[2];
-	#define FWD_RESP_CMPL_V				    0x1UL
-	#define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK		    0xfffffffeUL
-	#define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT		    1
+	__le16	type;
+	#define FWD_RESP_CMPL_TYPE_MASK         0x3fUL
+	#define FWD_RESP_CMPL_TYPE_SFT          0
+	#define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP  0x24UL
+	#define FWD_RESP_CMPL_TYPE_LAST          FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+	__le16	source_id;
+	__le16	resp_len;
+	__le16	unused_1;
+	__le32	resp_buf_addr_v[2];
+	#define FWD_RESP_CMPL_V                 0x1UL
+	#define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+	#define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
 };
 
-/* HWRM Asynchronous Event Completion Record (16 bytes) */
+/* hwrm_async_event_cmpl (size:128b/16B) */
 struct hwrm_async_event_cmpl {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_TYPE_MASK			    0x3fUL
-	#define ASYNC_EVENT_CMPL_TYPE_SFT			    0
-	#define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT		   0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE      0x0UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE	   0x1UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE       0x2UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE       0x3UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED   0x4UL
+	__le16	type;
+	#define ASYNC_EVENT_CMPL_TYPE_MASK            0x3fUL
+	#define ASYNC_EVENT_CMPL_TYPE_SFT             0
+	#define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+	#define ASYNC_EVENT_CMPL_TYPE_LAST             ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
+	__le16	event_id;
+	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE         0x0UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE            0x1UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE          0x2UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE          0x3UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED      0x4UL
 	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE   0x6UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE     0x7UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD	   0x10UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD	   0x11UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT     0x12UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD	   0x20UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD		   0x21UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR		   0x30UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE      0x31UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE	   0x33UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE	   0x34UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR		   0xffUL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_V				    0x1UL
-	#define ASYNC_EVENT_CMPL_OPAQUE_MASK			    0xfeUL
-	#define ASYNC_EVENT_CMPL_OPAQUE_SFT			    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
+	#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE      0x6UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE        0x7UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD           0x10UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD             0x11UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT        0x12UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD             0x20UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD               0x21UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR                     0x30UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE         0x31UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE   0x32UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE              0x33UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE            0x34UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR                 0xffUL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_LAST                      ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+	__le32	event_data2;
+	u8	opaque_v;
+	#define ASYNC_EVENT_CMPL_V          0x1UL
+	#define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+	#define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+	u8	timestamp_lo;
+	__le16	timestamp_hi;
+	__le32	event_data1;
 };
 
-/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
 struct hwrm_async_event_cmpl_link_status_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK      0x3fUL
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT       0
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
+	__le16	type;
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK            0x3fUL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT             0
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST             ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+	__le16	event_id;
 	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK    0xfeUL
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT     1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST    ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
-	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST              ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+	__le32	event_data2;
+	u8	opaque_v;
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V          0x1UL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+	u8	timestamp_lo;
+	__le16	timestamp_hi;
+	__le32	event_data1;
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE     0x1UL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN  0x0UL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP    0x1UL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK       0xeUL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT        1
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK    0xffff0UL
+	#define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT     4
 };
 
-/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
-struct hwrm_async_event_cmpl_link_mtu_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK	    0x3fUL
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK       0xfeUL
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT	    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
-struct hwrm_async_event_cmpl_link_speed_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK       0x3fUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK     0xfeUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT      1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST    ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
-struct hwrm_async_event_cmpl_dcb_config_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK       0x3fUL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
-	__le32 event_data2;
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK     0xfeUL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT      1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16)
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST    ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24)
-	#define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST    ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
-};
-
-/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
 struct hwrm_async_event_cmpl_port_conn_not_allowed {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK   0x3fUL
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT    0
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
+	__le16	type;
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK            0x3fUL
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT             0
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST             ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+	__le16	event_id;
 	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V	    0x1UL
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST                 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+	__le32	event_data2;
+	u8	opaque_v;
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V          0x1UL
 	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT  1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
-	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST    ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+	u8	timestamp_lo;
+	__le16	timestamp_hi;
+	__le32	event_data1;
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK                 0xffffUL
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT                  0
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK      0xff0000UL
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT       16
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE        (0x0UL << 16)
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX   (0x1UL << 16)
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG  (0x2UL << 16)
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN     (0x3UL << 16)
+	#define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST       ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
 };
 
-/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
-struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V      0x1UL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
 struct hwrm_async_event_cmpl_link_speed_cfg_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK   0x3fUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT    0
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
+	__le16	type;
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK            0x3fUL
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT             0
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST             ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+	__le16	event_id;
 	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V	    0x1UL
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST                 ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+	__le32	event_data2;
+	u8	opaque_v;
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V          0x1UL
 	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT  1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
-	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+	u8	timestamp_lo;
+	__le16	timestamp_hi;
+	__le32	event_data1;
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK                     0xffffUL
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT                      0
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE     0x10000UL
+	#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG           0x20000UL
 };
 
-/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
-struct hwrm_async_event_cmpl_func_drvr_unload {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK	    0x3fUL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK      0xfeUL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT       1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
-struct hwrm_async_event_cmpl_func_drvr_load {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK	    0x3fUL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK	    0xfeUL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT	    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record to indicate completion of FLR related processing (16 bytes) */
-struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK     0x3fUL
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT      0
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK   0xfeUL
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */
 struct hwrm_async_event_cmpl_pf_drvr_unload {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK	    0x3fUL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
+	__le16	type;
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK            0x3fUL
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT             0
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST             ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
+	__le16	event_id;
 	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK	    0xfeUL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT	    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST          ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD
+	__le32	event_data2;
+	u8	opaque_v;
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V          0x1UL
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+	u8	timestamp_lo;
+	__le16	timestamp_hi;
+	__le32	event_data1;
 	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
 	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK   0x70000UL
+	#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT    16
 };
 
-/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
-struct hwrm_async_event_cmpl_pf_drvr_load {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK	    0x3fUL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT		    0
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK	    0xfeUL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT	    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL
-	#define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
-};
-
-/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
-struct hwrm_async_event_cmpl_vf_flr {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK		    0x3fUL
-	#define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT		    0
-	#define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT     0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR	   0x30UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_VF_FLR_V			    0x1UL
-	#define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK		    0xfeUL
-	#define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT		    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK     0xffffUL
-	#define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT      0
-};
-
-/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
-struct hwrm_async_event_cmpl_vf_mac_addr_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK      0x3fUL
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT       0
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK    0xfeUL
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT     1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
-	#define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
-};
-
-/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
-struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V	    0x1UL
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
-};
-
-/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
 struct hwrm_async_event_cmpl_vf_cfg_change {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK	    0x3fUL
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT	    0
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
+	__le16	type;
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK            0x3fUL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT             0
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST             ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+	__le16	event_id;
 	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
-	__le32 event_data2;
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V		    0x1UL
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK	    0xfeUL
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT	    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
-	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST         ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+	__le32	event_data2;
+	u8	opaque_v;
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V          0x1UL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+	u8	timestamp_lo;
+	__le16	timestamp_hi;
+	__le32	event_data1;
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE               0x1UL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE               0x2UL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE     0x4UL
+	#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE         0x8UL
 };
 
-/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
-struct hwrm_async_event_cmpl_hwrm_error {
-	__le16 type;
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK		    0x3fUL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT		    0
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
-	__le16 event_id;
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR   0xffUL
-	__le32 event_data2;
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST    ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
-	u8 opaque_v;
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_V			    0x1UL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK	    0xfeUL
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT		    1
-	u8 timestamp_lo;
-	__le16 timestamp_hi;
-	__le32 event_data1;
-	#define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP  0x1UL
-};
-
-/* hwrm_ver_get */
-/* Input (24 bytes) */
-struct hwrm_ver_get_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 hwrm_intf_maj;
-	u8 hwrm_intf_min;
-	u8 hwrm_intf_upd;
-	u8 unused_0[5];
-};
-
-/* Output (128 bytes) */
-struct hwrm_ver_get_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 hwrm_intf_maj;
-	u8 hwrm_intf_min;
-	u8 hwrm_intf_upd;
-	u8 hwrm_intf_rsvd;
-	u8 hwrm_fw_maj;
-	u8 hwrm_fw_min;
-	u8 hwrm_fw_bld;
-	u8 hwrm_fw_rsvd;
-	u8 mgmt_fw_maj;
-	u8 mgmt_fw_min;
-	u8 mgmt_fw_bld;
-	u8 mgmt_fw_rsvd;
-	u8 netctrl_fw_maj;
-	u8 netctrl_fw_min;
-	u8 netctrl_fw_bld;
-	u8 netctrl_fw_rsvd;
-	__le32 dev_caps_cfg;
-	#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED  0x1UL
-	#define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED  0x2UL
-	#define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED      0x4UL
-	#define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED       0x8UL
-	u8 roce_fw_maj;
-	u8 roce_fw_min;
-	u8 roce_fw_bld;
-	u8 roce_fw_rsvd;
-	char hwrm_fw_name[16];
-	char mgmt_fw_name[16];
-	char netctrl_fw_name[16];
-	__le32 reserved2[4];
-	char roce_fw_name[16];
-	__le16 chip_num;
-	u8 chip_rev;
-	u8 chip_metal;
-	u8 chip_bond_id;
-	u8 chip_platform_type;
-	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC		   0x0UL
-	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA		   0x1UL
-	#define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM	   0x2UL
-	__le16 max_req_win_len;
-	__le16 max_resp_len;
-	__le16 def_req_timeout;
-	u8 init_pending;
-	#define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY		    0x1UL
-	u8 unused_0;
-	u8 unused_1;
-	u8 valid;
-};
-
-/* hwrm_func_reset */
-/* Input (24 bytes) */
+/* hwrm_func_reset_input (size:192b/24B) */
 struct hwrm_func_reset_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define FUNC_RESET_REQ_ENABLES_VF_ID_VALID		    0x1UL
-	__le16 vf_id;
-	u8 func_reset_level;
-	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL	   0x0UL
-	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME	   0x1UL
-	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN     0x2UL
-	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF	   0x3UL
-	u8 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define FUNC_RESET_REQ_ENABLES_VF_ID_VALID     0x1UL
+	__le16	vf_id;
+	u8	func_reset_level;
+	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL      0x0UL
+	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME       0x1UL
+	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF       0x3UL
+	#define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST         FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
+	u8	unused_0;
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_reset_output (size:128b/16B) */
 struct hwrm_func_reset_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_func_getfid */
-/* Input (24 bytes) */
+/* hwrm_func_getfid_input (size:192b/24B) */
 struct hwrm_func_getfid_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define FUNC_GETFID_REQ_ENABLES_PCI_ID			    0x1UL
-	__le16 pci_id;
-	__le16 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define FUNC_GETFID_REQ_ENABLES_PCI_ID     0x1UL
+	__le16	pci_id;
+	u8	unused_0[2];
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_getfid_output (size:128b/16B) */
 struct hwrm_func_getfid_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 fid;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	fid;
+	u8	unused_0[5];
+	u8	valid;
 };
 
-/* hwrm_func_vf_alloc */
-/* Input (24 bytes) */
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
 struct hwrm_func_vf_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID		    0x1UL
-	__le16 first_vf_id;
-	__le16 num_vfs;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID     0x1UL
+	__le16	first_vf_id;
+	__le16	num_vfs;
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
 struct hwrm_func_vf_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 first_vf_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	first_vf_id;
+	u8	unused_0[5];
+	u8	valid;
 };
 
-/* hwrm_func_vf_free */
-/* Input (24 bytes) */
+/* hwrm_func_vf_free_input (size:192b/24B) */
 struct hwrm_func_vf_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID		    0x1UL
-	__le16 first_vf_id;
-	__le16 num_vfs;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID     0x1UL
+	__le16	first_vf_id;
+	__le16	num_vfs;
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_vf_free_output (size:128b/16B) */
 struct hwrm_func_vf_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_func_vf_cfg */
-/* Input (32 bytes) */
+/* hwrm_func_vf_cfg_input (size:448b/56B) */
 struct hwrm_func_vf_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define FUNC_VF_CFG_REQ_ENABLES_MTU			    0x1UL
-	#define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN		    0x2UL
-	#define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR		    0x4UL
-	#define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR		    0x8UL
-	__le16 mtu;
-	__le16 guest_vlan;
-	__le16 async_event_cr;
-	u8 dflt_mac_addr[6];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define FUNC_VF_CFG_REQ_ENABLES_MTU                  0x1UL
+	#define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN           0x2UL
+	#define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR       0x4UL
+	#define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR        0x8UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS      0x10UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS       0x20UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS         0x40UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS         0x80UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS          0x100UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS            0x200UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS        0x400UL
+	#define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS     0x800UL
+	__le16	mtu;
+	__le16	guest_vlan;
+	__le16	async_event_cr;
+	u8	dflt_mac_addr[6];
+	__le32	flags;
+	#define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST             0x1UL
+	#define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST             0x2UL
+	#define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST           0x4UL
+	#define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST     0x8UL
+	#define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST       0x10UL
+	#define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST       0x20UL
+	#define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST           0x40UL
+	#define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST         0x80UL
+	__le16	num_rsscos_ctxs;
+	__le16	num_cmpl_rings;
+	__le16	num_tx_rings;
+	__le16	num_rx_rings;
+	__le16	num_l2_ctxs;
+	__le16	num_vnics;
+	__le16	num_stat_ctxs;
+	__le16	num_hw_ring_grps;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
 struct hwrm_func_vf_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_func_qcaps */
-/* Input (24 bytes) */
+/* hwrm_func_qcaps_input (size:192b/24B) */
 struct hwrm_func_qcaps_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 fid;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[6];
 };
 
-/* Output (80 bytes) */
+/* hwrm_func_qcaps_output (size:640b/80B) */
 struct hwrm_func_qcaps_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 fid;
-	__le16 port_id;
-	__le32 flags;
-	#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED	    0x1UL
-	#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING      0x2UL
-	#define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED		    0x4UL
-	#define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED	    0x8UL
-	#define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED	    0x10UL
-	#define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED       0x20UL
-	#define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED	    0x40UL
-	#define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED	    0x80UL
-	#define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED	    0x100UL
-	#define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED      0x200UL
-	#define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED	    0x400UL
-	#define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED   0x800UL
-	u8 mac_address[6];
-	__le16 max_rsscos_ctx;
-	__le16 max_cmpl_rings;
-	__le16 max_tx_rings;
-	__le16 max_rx_rings;
-	__le16 max_l2_ctxs;
-	__le16 max_vnics;
-	__le16 first_vf_id;
-	__le16 max_vfs;
-	__le16 max_stat_ctx;
-	__le32 max_encap_records;
-	__le32 max_decap_records;
-	__le32 max_tx_em_flows;
-	__le32 max_tx_wm_flows;
-	__le32 max_rx_em_flows;
-	__le32 max_rx_wm_flows;
-	__le32 max_mcast_filters;
-	__le32 max_flow_id;
-	__le32 max_hw_ring_grps;
-	__le16 max_sp_tx_rings;
-	u8 unused_0;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	fid;
+	__le16	port_id;
+	__le32	flags;
+	#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED            0x1UL
+	#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING        0x2UL
+	#define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED                  0x4UL
+	#define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED              0x8UL
+	#define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED              0x10UL
+	#define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED         0x20UL
+	#define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED              0x40UL
+	#define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED           0x80UL
+	#define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED            0x100UL
+	#define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED        0x200UL
+	#define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED            0x400UL
+	#define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED     0x800UL
+	#define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED     0x1000UL
+	#define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED      0x2000UL
+	#define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED        0x4000UL
+	#define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED       0x8000UL
+	u8	mac_address[6];
+	__le16	max_rsscos_ctx;
+	__le16	max_cmpl_rings;
+	__le16	max_tx_rings;
+	__le16	max_rx_rings;
+	__le16	max_l2_ctxs;
+	__le16	max_vnics;
+	__le16	first_vf_id;
+	__le16	max_vfs;
+	__le16	max_stat_ctx;
+	__le32	max_encap_records;
+	__le32	max_decap_records;
+	__le32	max_tx_em_flows;
+	__le32	max_tx_wm_flows;
+	__le32	max_rx_em_flows;
+	__le32	max_rx_wm_flows;
+	__le32	max_mcast_filters;
+	__le32	max_flow_id;
+	__le32	max_hw_ring_grps;
+	__le16	max_sp_tx_rings;
+	u8	unused_0;
+	u8	valid;
 };
 
-/* hwrm_func_qcfg */
-/* Input (24 bytes) */
+/* hwrm_func_qcfg_input (size:192b/24B) */
 struct hwrm_func_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 fid;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[6];
 };
 
-/* Output (72 bytes) */
+/* hwrm_func_qcfg_output (size:640b/80B) */
 struct hwrm_func_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 fid;
-	__le16 port_id;
-	__le16 vlan;
-	__le16 flags;
-	#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED      0x1UL
-	#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED	    0x2UL
-	#define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED	    0x4UL
-	#define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED      0x8UL
-	#define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED	    0x10UL
-	#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST		    0x20UL
-	u8 mac_address[6];
-	__le16 pci_id;
-	__le16 alloc_rsscos_ctx;
-	__le16 alloc_cmpl_rings;
-	__le16 alloc_tx_rings;
-	__le16 alloc_rx_rings;
-	__le16 alloc_l2_ctx;
-	__le16 alloc_vnics;
-	__le16 mtu;
-	__le16 mru;
-	__le16 stat_ctx_id;
-	u8 port_partition_type;
-	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF		   0x0UL
-	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS	   0x1UL
-	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0	   0x2UL
-	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5	   0x3UL
-	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0	   0x4UL
-	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN	   0xffUL
-	u8 port_pf_cnt;
-	#define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL		   0x0UL
-	__le16 dflt_vnic_id;
-	__le16 max_mtu_configured;
-	__le32 min_bw;
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK		    0xfffffffUL
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT		    0
-	#define FUNC_QCFG_RESP_MIN_BW_SCALE			    0x10000000UL
-	#define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS		   (0x0UL << 28)
-	#define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES		   (0x1UL << 28)
-	#define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST    FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK	    0xe0000000UL
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT	    29
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA	   (0x0UL << 29)
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO	   (0x2UL << 29)
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE	   (0x4UL << 29)
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA	   (0x6UL << 29)
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	fid;
+	__le16	port_id;
+	__le16	vlan;
+	__le16	flags;
+	#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED     0x1UL
+	#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED          0x2UL
+	#define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED        0x4UL
+	#define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED     0x8UL
+	#define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED        0x10UL
+	#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST                   0x20UL
+	u8	mac_address[6];
+	__le16	pci_id;
+	__le16	alloc_rsscos_ctx;
+	__le16	alloc_cmpl_rings;
+	__le16	alloc_tx_rings;
+	__le16	alloc_rx_rings;
+	__le16	alloc_l2_ctx;
+	__le16	alloc_vnics;
+	__le16	mtu;
+	__le16	mru;
+	__le16	stat_ctx_id;
+	u8	port_partition_type;
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF     0x0UL
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS    0x1UL
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST   FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
+	u8	port_pf_cnt;
+	#define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
+	#define FUNC_QCFG_RESP_PORT_PF_CNT_LAST   FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
+	__le16	dflt_vnic_id;
+	__le16	max_mtu_configured;
+	__le32	min_bw;
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT              0
+	#define FUNC_QCFG_RESP_MIN_BW_SCALE                     0x10000000UL
+	#define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST                 FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
 	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
 	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
-	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST    FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 max_bw;
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK		    0xfffffffUL
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT		    0
-	#define FUNC_QCFG_RESP_MAX_BW_SCALE			    0x10000000UL
-	#define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS		   (0x0UL << 28)
-	#define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES		   (0x1UL << 28)
-	#define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST    FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK	    0xe0000000UL
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT	    29
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA	   (0x0UL << 29)
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO	   (0x2UL << 29)
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE	   (0x4UL << 29)
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA	   (0x6UL << 29)
+	#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST         FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	max_bw;
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT              0
+	#define FUNC_QCFG_RESP_MAX_BW_SCALE                     0x10000000UL
+	#define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST                 FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
 	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
 	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
-	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST    FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 evb_mode;
-	#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB			   0x0UL
-	#define FUNC_QCFG_RESP_EVB_MODE_VEB			   0x1UL
-	#define FUNC_QCFG_RESP_EVB_MODE_VEPA			   0x2UL
-	u8 unused_0;
-	__le16 alloc_vfs;
-	__le32 alloc_mcast_filters;
-	__le32 alloc_hw_ring_grps;
-	__le16 alloc_sp_tx_rings;
-	u8 unused_1;
-	u8 valid;
+	#define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST         FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	evb_mode;
+	#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+	#define FUNC_QCFG_RESP_EVB_MODE_VEB    0x1UL
+	#define FUNC_QCFG_RESP_EVB_MODE_VEPA   0x2UL
+	#define FUNC_QCFG_RESP_EVB_MODE_LAST  FUNC_QCFG_RESP_EVB_MODE_VEPA
+	u8	cache_linesize;
+	#define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64  0x0UL
+	#define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
+	#define FUNC_QCFG_RESP_CACHE_LINESIZE_LAST              FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128
+	__le16	alloc_vfs;
+	__le32	alloc_mcast_filters;
+	__le32	alloc_hw_ring_grps;
+	__le16	alloc_sp_tx_rings;
+	__le16	alloc_stat_ctx;
+	u8	unused_2[7];
+	u8	valid;
 };
 
-/* hwrm_func_vlan_cfg */
-/* Input (48 bytes) */
+/* hwrm_func_vlan_cfg_input (size:384b/48B) */
 struct hwrm_func_vlan_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 fid;
-	u8 unused_0;
-	u8 unused_1;
-	__le32 enables;
-	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID		    0x1UL
-	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID		    0x2UL
-	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP		    0x4UL
-	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP		    0x8UL
-	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID		    0x10UL
-	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID		    0x20UL
-	__le16 stag_vid;
-	u8 stag_pcp;
-	u8 unused_2;
-	__be16 stag_tpid;
-	__le16 ctag_vid;
-	u8 ctag_pcp;
-	u8 unused_3;
-	__be16 ctag_tpid;
-	__le32 rsvd1;
-	__le32 rsvd2;
-	__le32 unused_4;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[2];
+	__le32	enables;
+	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID      0x1UL
+	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID      0x2UL
+	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP      0x4UL
+	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP      0x8UL
+	#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID     0x10UL
+	#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID     0x20UL
+	__le16	stag_vid;
+	u8	stag_pcp;
+	u8	unused_1;
+	__be16	stag_tpid;
+	__le16	ctag_vid;
+	u8	ctag_pcp;
+	u8	unused_2;
+	__be16	ctag_tpid;
+	__le32	rsvd1;
+	__le32	rsvd2;
+	u8	unused_3[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_vlan_cfg_output (size:128b/16B) */
 struct hwrm_func_vlan_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_func_cfg */
-/* Input (88 bytes) */
+/* hwrm_func_cfg_input (size:704b/88B) */
 struct hwrm_func_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 fid;
-	u8 unused_0;
-	u8 unused_1;
-	__le32 flags;
-	#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE      0x1UL
-	#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE       0x2UL
-	#define FUNC_CFG_REQ_FLAGS_RSVD_MASK			    0x1fcUL
-	#define FUNC_CFG_REQ_FLAGS_RSVD_SFT			    2
-	#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE	    0x200UL
-	#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE	    0x400UL
-	#define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST		    0x800UL
-	#define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC	    0x1000UL
-	#define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST		    0x2000UL
-	__le32 enables;
-	#define FUNC_CFG_REQ_ENABLES_MTU			    0x1UL
-	#define FUNC_CFG_REQ_ENABLES_MRU			    0x2UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS		    0x4UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS		    0x8UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS		    0x10UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS		    0x20UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS		    0x40UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_VNICS			    0x80UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS		    0x100UL
-	#define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR		    0x200UL
-	#define FUNC_CFG_REQ_ENABLES_DFLT_VLAN			    0x400UL
-	#define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR		    0x800UL
-	#define FUNC_CFG_REQ_ENABLES_MIN_BW			    0x1000UL
-	#define FUNC_CFG_REQ_ENABLES_MAX_BW			    0x2000UL
-	#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR		    0x4000UL
-	#define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE	    0x8000UL
-	#define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS		    0x10000UL
-	#define FUNC_CFG_REQ_ENABLES_EVB_MODE			    0x20000UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS		    0x40000UL
-	#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS		    0x80000UL
-	__le16 mtu;
-	__le16 mru;
-	__le16 num_rsscos_ctxs;
-	__le16 num_cmpl_rings;
-	__le16 num_tx_rings;
-	__le16 num_rx_rings;
-	__le16 num_l2_ctxs;
-	__le16 num_vnics;
-	__le16 num_stat_ctxs;
-	__le16 num_hw_ring_grps;
-	u8 dflt_mac_addr[6];
-	__le16 dflt_vlan;
-	__be32 dflt_ip_addr[4];
-	__le32 min_bw;
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK		    0xfffffffUL
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT		    0
-	#define FUNC_CFG_REQ_MIN_BW_SCALE			    0x10000000UL
-	#define FUNC_CFG_REQ_MIN_BW_SCALE_BITS			   (0x0UL << 28)
-	#define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES		   (0x1UL << 28)
-	#define FUNC_CFG_REQ_MIN_BW_SCALE_LAST    FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK		    0xe0000000UL
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT		    29
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA		   (0x0UL << 29)
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO		   (0x2UL << 29)
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE		   (0x4UL << 29)
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA		   (0x6UL << 29)
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100    (0x1UL << 29)
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID	   (0x7UL << 29)
-	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST    FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 max_bw;
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK		    0xfffffffUL
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT		    0
-	#define FUNC_CFG_REQ_MAX_BW_SCALE			    0x10000000UL
-	#define FUNC_CFG_REQ_MAX_BW_SCALE_BITS			   (0x0UL << 28)
-	#define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES		   (0x1UL << 28)
-	#define FUNC_CFG_REQ_MAX_BW_SCALE_LAST    FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK		    0xe0000000UL
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT		    29
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA		   (0x0UL << 29)
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO		   (0x2UL << 29)
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE		   (0x4UL << 29)
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA		   (0x6UL << 29)
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100    (0x1UL << 29)
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID	   (0x7UL << 29)
-	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST    FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
-	__le16 async_event_cr;
-	u8 vlan_antispoof_mode;
-	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK	   0x0UL
-	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN    0x1UL
-	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[2];
+	__le32	flags;
+	#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE     0x1UL
+	#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE      0x2UL
+	#define FUNC_CFG_REQ_FLAGS_RSVD_MASK                      0x1fcUL
+	#define FUNC_CFG_REQ_FLAGS_RSVD_SFT                       2
+	#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE        0x200UL
+	#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE       0x400UL
+	#define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST               0x800UL
+	#define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC         0x1000UL
+	#define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST                 0x2000UL
+	#define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST                 0x4000UL
+	#define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST               0x8000UL
+	#define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST         0x10000UL
+	#define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST           0x20000UL
+	#define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST           0x40000UL
+	#define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST               0x80000UL
+	#define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST             0x100000UL
+	__le32	enables;
+	#define FUNC_CFG_REQ_ENABLES_MTU                     0x1UL
+	#define FUNC_CFG_REQ_ENABLES_MRU                     0x2UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS         0x4UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS          0x8UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS            0x10UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS            0x20UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS             0x40UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_VNICS               0x80UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS           0x100UL
+	#define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR           0x200UL
+	#define FUNC_CFG_REQ_ENABLES_DFLT_VLAN               0x400UL
+	#define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR            0x800UL
+	#define FUNC_CFG_REQ_ENABLES_MIN_BW                  0x1000UL
+	#define FUNC_CFG_REQ_ENABLES_MAX_BW                  0x2000UL
+	#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR          0x4000UL
+	#define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE     0x8000UL
+	#define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS       0x10000UL
+	#define FUNC_CFG_REQ_ENABLES_EVB_MODE                0x20000UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS       0x40000UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS        0x80000UL
+	#define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE          0x100000UL
+	__le16	mtu;
+	__le16	mru;
+	__le16	num_rsscos_ctxs;
+	__le16	num_cmpl_rings;
+	__le16	num_tx_rings;
+	__le16	num_rx_rings;
+	__le16	num_l2_ctxs;
+	__le16	num_vnics;
+	__le16	num_stat_ctxs;
+	__le16	num_hw_ring_grps;
+	u8	dflt_mac_addr[6];
+	__le16	dflt_vlan;
+	__be32	dflt_ip_addr[4];
+	__le32	min_bw;
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT              0
+	#define FUNC_CFG_REQ_MIN_BW_SCALE                     0x10000000UL
+	#define FUNC_CFG_REQ_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define FUNC_CFG_REQ_MIN_BW_SCALE_LAST                 FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST         FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	max_bw;
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT              0
+	#define FUNC_CFG_REQ_MAX_BW_SCALE                     0x10000000UL
+	#define FUNC_CFG_REQ_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define FUNC_CFG_REQ_MAX_BW_SCALE_LAST                 FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST         FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+	__le16	async_event_cr;
+	u8	vlan_antispoof_mode;
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK                 0x0UL
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN           0x1UL
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE       0x2UL
 	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
-	u8 allowed_vlan_pris;
-	u8 evb_mode;
-	#define FUNC_CFG_REQ_EVB_MODE_NO_EVB			   0x0UL
-	#define FUNC_CFG_REQ_EVB_MODE_VEB			   0x1UL
-	#define FUNC_CFG_REQ_EVB_MODE_VEPA			   0x2UL
-	u8 unused_2;
-	__le16 num_mcast_filters;
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST                   FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
+	u8	allowed_vlan_pris;
+	u8	evb_mode;
+	#define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+	#define FUNC_CFG_REQ_EVB_MODE_VEB    0x1UL
+	#define FUNC_CFG_REQ_EVB_MODE_VEPA   0x2UL
+	#define FUNC_CFG_REQ_EVB_MODE_LAST  FUNC_CFG_REQ_EVB_MODE_VEPA
+	u8	cache_linesize;
+	#define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_64  0x0UL
+	#define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
+	#define FUNC_CFG_REQ_CACHE_LINESIZE_LAST              FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128
+	__le16	num_mcast_filters;
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_cfg_output (size:128b/16B) */
 struct hwrm_func_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_func_qstats */
-/* Input (24 bytes) */
+/* hwrm_func_qstats_input (size:192b/24B) */
 struct hwrm_func_qstats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 fid;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[6];
 };
 
-/* Output (176 bytes) */
+/* hwrm_func_qstats_output (size:1408b/176B) */
 struct hwrm_func_qstats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 tx_ucast_pkts;
-	__le64 tx_mcast_pkts;
-	__le64 tx_bcast_pkts;
-	__le64 tx_discard_pkts;
-	__le64 tx_drop_pkts;
-	__le64 tx_ucast_bytes;
-	__le64 tx_mcast_bytes;
-	__le64 tx_bcast_bytes;
-	__le64 rx_ucast_pkts;
-	__le64 rx_mcast_pkts;
-	__le64 rx_bcast_pkts;
-	__le64 rx_discard_pkts;
-	__le64 rx_drop_pkts;
-	__le64 rx_ucast_bytes;
-	__le64 rx_mcast_bytes;
-	__le64 rx_bcast_bytes;
-	__le64 rx_agg_pkts;
-	__le64 rx_agg_bytes;
-	__le64 rx_agg_events;
-	__le64 rx_agg_aborts;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	tx_ucast_pkts;
+	__le64	tx_mcast_pkts;
+	__le64	tx_bcast_pkts;
+	__le64	tx_discard_pkts;
+	__le64	tx_drop_pkts;
+	__le64	tx_ucast_bytes;
+	__le64	tx_mcast_bytes;
+	__le64	tx_bcast_bytes;
+	__le64	rx_ucast_pkts;
+	__le64	rx_mcast_pkts;
+	__le64	rx_bcast_pkts;
+	__le64	rx_discard_pkts;
+	__le64	rx_drop_pkts;
+	__le64	rx_ucast_bytes;
+	__le64	rx_mcast_bytes;
+	__le64	rx_bcast_bytes;
+	__le64	rx_agg_pkts;
+	__le64	rx_agg_bytes;
+	__le64	rx_agg_events;
+	__le64	rx_agg_aborts;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_func_clr_stats */
-/* Input (24 bytes) */
+/* hwrm_func_clr_stats_input (size:192b/24B) */
 struct hwrm_func_clr_stats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 fid;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_clr_stats_output (size:128b/16B) */
 struct hwrm_func_clr_stats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_func_vf_resc_free */
-/* Input (24 bytes) */
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
 struct hwrm_func_vf_resc_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 vf_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	vf_id;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
 struct hwrm_func_vf_resc_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_func_vf_vnic_ids_query */
-/* Input (32 bytes) */
+/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
 struct hwrm_func_vf_vnic_ids_query_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 vf_id;
-	u8 unused_0;
-	u8 unused_1;
-	__le32 max_vnic_id_cnt;
-	__le64 vnic_id_tbl_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	vf_id;
+	u8	unused_0[2];
+	__le32	max_vnic_id_cnt;
+	__le64	vnic_id_tbl_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
 struct hwrm_func_vf_vnic_ids_query_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 vnic_id_cnt;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	vnic_id_cnt;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_func_drv_rgtr */
-/* Input (80 bytes) */
+/* hwrm_func_drv_rgtr_input (size:832b/104B) */
 struct hwrm_func_drv_rgtr_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE		    0x1UL
-	#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE		    0x2UL
-	__le32 enables;
-	#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE		    0x1UL
-	#define FUNC_DRV_RGTR_REQ_ENABLES_VER			    0x2UL
-	#define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP		    0x4UL
-	#define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD		    0x8UL
-	#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD	    0x10UL
-	__le16 os_type;
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN		   0x0UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER		   0x1UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS		   0xeUL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS		   0x12UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS		   0x1dUL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX		   0x24UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD		   0x2aUL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI			   0x68UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864		   0x73UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2		   0x74UL
-	#define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI			   0x8000UL
-	u8 ver_maj;
-	u8 ver_min;
-	u8 ver_upd;
-	u8 unused_0;
-	__le16 unused_1;
-	__le32 timestamp;
-	__le32 unused_2;
-	__le32 vf_req_fwd[8];
-	__le32 async_event_fwd[8];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE      0x1UL
+	#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE     0x2UL
+	__le32	enables;
+	#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE             0x1UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_VER                 0x2UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP           0x4UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD          0x8UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD     0x10UL
+	__le16	os_type;
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN   0x0UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER     0x1UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS     0xeUL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS   0x12UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS   0x1dUL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX     0x24UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD   0x2aUL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI      0x68UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864    0x73UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI      0x8000UL
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST     FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
+	u8	ver_maj;
+	u8	ver_min;
+	u8	ver_upd;
+	u8	unused_0[3];
+	__le32	timestamp;
+	u8	unused_1[4];
+	__le32	vf_req_fwd[8];
+	__le32	async_event_fwd[8];
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
 struct hwrm_func_drv_rgtr_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_func_drv_unrgtr */
-/* Input (24 bytes) */
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
 struct hwrm_func_drv_unrgtr_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
 	#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN     0x1UL
-	__le32 unused_0;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
 struct hwrm_func_drv_unrgtr_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_func_buf_rgtr */
-/* Input (128 bytes) */
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
 struct hwrm_func_buf_rgtr_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID		    0x1UL
-	#define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR		    0x2UL
-	__le16 vf_id;
-	__le16 req_buf_num_pages;
-	__le16 req_buf_page_size;
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B	   0x4UL
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K		   0xcUL
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K		   0xdUL
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K	   0x10UL
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M		   0x15UL
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M		   0x16UL
-	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G		   0x1eUL
-	__le16 req_buf_len;
-	__le16 resp_buf_len;
-	u8 unused_0;
-	u8 unused_1;
-	__le64 req_buf_page_addr0;
-	__le64 req_buf_page_addr1;
-	__le64 req_buf_page_addr2;
-	__le64 req_buf_page_addr3;
-	__le64 req_buf_page_addr4;
-	__le64 req_buf_page_addr5;
-	__le64 req_buf_page_addr6;
-	__le64 req_buf_page_addr7;
-	__le64 req_buf_page_addr8;
-	__le64 req_buf_page_addr9;
-	__le64 error_buf_addr;
-	__le64 resp_buf_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID            0x1UL
+	#define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR     0x2UL
+	__le16	vf_id;
+	__le16	req_buf_num_pages;
+	__le16	req_buf_page_size;
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K  0xcUL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K  0xdUL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M  0x15UL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M  0x16UL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G  0x1eUL
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
+	__le16	req_buf_len;
+	__le16	resp_buf_len;
+	u8	unused_0[2];
+	__le64	req_buf_page_addr0;
+	__le64	req_buf_page_addr1;
+	__le64	req_buf_page_addr2;
+	__le64	req_buf_page_addr3;
+	__le64	req_buf_page_addr4;
+	__le64	req_buf_page_addr5;
+	__le64	req_buf_page_addr6;
+	__le64	req_buf_page_addr7;
+	__le64	req_buf_page_addr8;
+	__le64	req_buf_page_addr9;
+	__le64	error_buf_addr;
+	__le64	resp_buf_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
 struct hwrm_func_buf_rgtr_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_func_drv_qver */
-/* Input (24 bytes) */
+/* hwrm_func_drv_qver_input (size:192b/24B) */
 struct hwrm_func_drv_qver_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 reserved;
-	__le16 fid;
-	__le16 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	reserved;
+	__le16	fid;
+	u8	unused_0[2];
 };
 
-/* Output (16 bytes) */
+/* hwrm_func_drv_qver_output (size:128b/16B) */
 struct hwrm_func_drv_qver_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 os_type;
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN		   0x0UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER		   0x1UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS		   0xeUL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS		   0x12UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS		   0x1dUL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX		   0x24UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD		   0x2aUL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI		   0x68UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864		   0x73UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2		   0x74UL
-	#define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI		   0x8000UL
-	u8 ver_maj;
-	u8 ver_min;
-	u8 ver_upd;
-	u8 unused_0;
-	u8 unused_1;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	os_type;
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN   0x0UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER     0x1UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS     0xeUL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS   0x12UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS   0x1dUL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX     0x24UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD   0x2aUL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI      0x68UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864    0x73UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI      0x8000UL
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_LAST     FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
+	u8	ver_maj;
+	u8	ver_min;
+	u8	ver_upd;
+	u8	unused_0[2];
+	u8	valid;
 };
 
-/* hwrm_port_phy_cfg */
-/* Input (56 bytes) */
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	fid;
+	u8	unused_0[6];
+};
+
+/* hwrm_func_resource_qcaps_output (size:384b/48B) */
+struct hwrm_func_resource_qcaps_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	max_vfs;
+	__le16	max_msix;
+	__le16	vf_reservation_strategy;
+	#define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
+	#define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
+	#define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST   FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL
+	__le16	min_rsscos_ctx;
+	__le16	max_rsscos_ctx;
+	__le16	min_cmpl_rings;
+	__le16	max_cmpl_rings;
+	__le16	min_tx_rings;
+	__le16	max_tx_rings;
+	__le16	min_rx_rings;
+	__le16	max_rx_rings;
+	__le16	min_l2_ctxs;
+	__le16	max_l2_ctxs;
+	__le16	min_vnics;
+	__le16	max_vnics;
+	__le16	min_stat_ctx;
+	__le16	max_stat_ctx;
+	__le16	min_hw_ring_grps;
+	__le16	max_hw_ring_grps;
+	u8	unused_0;
+	u8	valid;
+};
+
+/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+struct hwrm_func_vf_resource_cfg_input {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	vf_id;
+	__le16	max_msix;
+	__le16	min_rsscos_ctx;
+	__le16	max_rsscos_ctx;
+	__le16	min_cmpl_rings;
+	__le16	max_cmpl_rings;
+	__le16	min_tx_rings;
+	__le16	max_tx_rings;
+	__le16	min_rx_rings;
+	__le16	max_rx_rings;
+	__le16	min_l2_ctxs;
+	__le16	max_l2_ctxs;
+	__le16	min_vnics;
+	__le16	max_vnics;
+	__le16	min_stat_ctx;
+	__le16	max_stat_ctx;
+	__le16	min_hw_ring_grps;
+	__le16	max_hw_ring_grps;
+	u8	unused_0[4];
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
+struct hwrm_func_vf_resource_cfg_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	reserved_rsscos_ctx;
+	__le16	reserved_cmpl_rings;
+	__le16	reserved_tx_rings;
+	__le16	reserved_rx_rings;
+	__le16	reserved_l2_ctxs;
+	__le16	reserved_vnics;
+	__le16	reserved_stat_ctx;
+	__le16	reserved_hw_ring_grps;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_port_phy_cfg_input (size:448b/56B) */
 struct hwrm_port_phy_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY		    0x1UL
-	#define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED		    0x2UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FORCE			    0x4UL
-	#define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG		    0x8UL
-	#define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE		    0x10UL
-	#define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE		    0x20UL
-	#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE	    0x40UL
-	#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE	    0x80UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE	    0x100UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE	    0x200UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE	    0x400UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE	    0x800UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE	    0x1000UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE	    0x2000UL
-	#define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN		    0x4000UL
-	__le32 enables;
-	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE		    0x1UL
-	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX		    0x2UL
-	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE		    0x4UL
-	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED	    0x8UL
-	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK      0x10UL
-	#define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED		    0x20UL
-	#define PORT_PHY_CFG_REQ_ENABLES_LPBK			    0x40UL
-	#define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS		    0x80UL
-	#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE		    0x100UL
-	#define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK       0x200UL
-	#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER		    0x400UL
-	__le16 port_id;
-	__le16 force_link_speed;
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB	   0x1UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB		   0xaUL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB		   0x14UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB	   0x19UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB		   0x64UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB		   0xc8UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB		   0xfaUL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB		   0x190UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB		   0x1f4UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB	   0x3e8UL
-	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB		   0xffffUL
-	u8 auto_mode;
-	#define PORT_PHY_CFG_REQ_AUTO_MODE_NONE		   0x0UL
-	#define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS		   0x1UL
-	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED		   0x2UL
-	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW	   0x3UL
-	#define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK		   0x4UL
-	u8 auto_duplex;
-	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF		   0x0UL
-	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL		   0x1UL
-	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH		   0x2UL
-	u8 auto_pause;
-	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX			    0x1UL
-	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX			    0x2UL
-	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE	    0x4UL
-	u8 unused_0;
-	__le16 auto_link_speed;
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB		   0x1UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB		   0xaUL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB		   0x14UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB		   0x19UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB		   0x64UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB		   0xc8UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB		   0xfaUL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB		   0x190UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB		   0x1f4UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB		   0x3e8UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB		   0xffffUL
-	__le16 auto_link_speed_mask;
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD      0x1UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB	    0x2UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD	    0x4UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB	    0x8UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB	    0x10UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB	    0x20UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB	    0x40UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB	    0x80UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB	    0x100UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB	    0x200UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB	    0x400UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB	    0x800UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD       0x1000UL
-	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB	    0x2000UL
-	u8 wirespeed;
-	#define PORT_PHY_CFG_REQ_WIRESPEED_OFF			   0x0UL
-	#define PORT_PHY_CFG_REQ_WIRESPEED_ON			   0x1UL
-	u8 lpbk;
-	#define PORT_PHY_CFG_REQ_LPBK_NONE			   0x0UL
-	#define PORT_PHY_CFG_REQ_LPBK_LOCAL			   0x1UL
-	#define PORT_PHY_CFG_REQ_LPBK_REMOTE			   0x2UL
-	u8 force_pause;
-	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX		    0x1UL
-	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX		    0x2UL
-	u8 unused_1;
-	__le32 preemphasis;
-	__le16 eee_link_speed_mask;
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1	    0x1UL
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB	    0x2UL
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2	    0x4UL
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB	    0x8UL
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3	    0x10UL
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4	    0x20UL
-	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB	    0x40UL
-	u8 unused_2;
-	u8 unused_3;
-	__le32 tx_lpi_timer;
-	__le32 unused_4;
-	#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK		    0xffffffUL
-	#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT		    0
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY                0x1UL
+	#define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED               0x2UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FORCE                    0x4UL
+	#define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG          0x8UL
+	#define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE               0x10UL
+	#define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE              0x20UL
+	#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE        0x40UL
+	#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE       0x80UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE       0x100UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE      0x200UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE      0x400UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE     0x800UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE      0x1000UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE     0x2000UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN           0x4000UL
+	__le32	enables;
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE                0x1UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX              0x2UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE               0x4UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED          0x8UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK     0x10UL
+	#define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED                0x20UL
+	#define PORT_PHY_CFG_REQ_ENABLES_LPBK                     0x40UL
+	#define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS              0x80UL
+	#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE              0x100UL
+	#define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK      0x200UL
+	#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER             0x400UL
+	__le16	port_id;
+	__le16	force_link_speed;
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB   0xaUL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB   0x14UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB  0x64UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB  0xc8UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB  0xfaUL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB  0x190UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB  0x1f4UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB  0xffffUL
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
+	u8	auto_mode;
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_NONE         0x0UL
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS   0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED    0x2UL
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK   0x4UL
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_LAST        PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
+	u8	auto_duplex;
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
+	u8	auto_pause;
+	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX                0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX                0x2UL
+	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE     0x4UL
+	u8	unused_0;
+	__le16	auto_link_speed;
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB   0xaUL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB   0x14UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB  0x64UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB  0xc8UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB  0xfaUL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB  0x190UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB  0x1f4UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB  0xffffUL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
+	__le16	auto_link_speed_mask;
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD     0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB       0x2UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD       0x4UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB         0x8UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB         0x10UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB       0x20UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB        0x40UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB        0x80UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB        0x100UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB        0x200UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB        0x400UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB       0x800UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD      0x1000UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB        0x2000UL
+	u8	wirespeed;
+	#define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+	#define PORT_PHY_CFG_REQ_WIRESPEED_ON  0x1UL
+	#define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
+	u8	lpbk;
+	#define PORT_PHY_CFG_REQ_LPBK_NONE   0x0UL
+	#define PORT_PHY_CFG_REQ_LPBK_LOCAL  0x1UL
+	#define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+	#define PORT_PHY_CFG_REQ_LPBK_LAST  PORT_PHY_CFG_REQ_LPBK_REMOTE
+	u8	force_pause;
+	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX     0x1UL
+	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX     0x2UL
+	u8	unused_1;
+	__le32	preemphasis;
+	__le16	eee_link_speed_mask;
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1     0x1UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB     0x2UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2     0x4UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB       0x8UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3     0x10UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4     0x20UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB      0x40UL
+	u8	unused_2[2];
+	__le32	tx_lpi_timer;
+	#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+	#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
+	__le32	unused_3;
 };
 
-/* Output (16 bytes) */
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
 struct hwrm_port_phy_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_port_phy_qcfg */
-/* Input (24 bytes) */
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
 struct hwrm_port_phy_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (96 bytes) */
+/* hwrm_port_phy_qcfg_output (size:768b/96B) */
 struct hwrm_port_phy_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 link;
-	#define PORT_PHY_QCFG_RESP_LINK_NO_LINK		   0x0UL
-	#define PORT_PHY_QCFG_RESP_LINK_SIGNAL			   0x1UL
-	#define PORT_PHY_QCFG_RESP_LINK_LINK			   0x2UL
-	u8 unused_0;
-	__le16 link_speed;
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB		   0x1UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB		   0xaUL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB		   0x14UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB		   0x19UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB		   0x64UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB		   0xc8UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB		   0xfaUL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB		   0x190UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB		   0x1f4UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB		   0x3e8UL
-	#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB		   0xffffUL
-	u8 duplex_cfg;
-	#define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF		   0x0UL
-	#define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL		   0x1UL
-	u8 pause;
-	#define PORT_PHY_QCFG_RESP_PAUSE_TX			    0x1UL
-	#define PORT_PHY_QCFG_RESP_PAUSE_RX			    0x2UL
-	__le16 support_speeds;
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD	    0x1UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB	    0x2UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD	    0x4UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB		    0x8UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB		    0x10UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB	    0x20UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB		    0x40UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB		    0x80UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB		    0x100UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB		    0x200UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB		    0x400UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB	    0x800UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD	    0x1000UL
-	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB		    0x2000UL
-	__le16 force_link_speed;
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB	   0x1UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB	   0xaUL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB	   0x14UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB	   0x19UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB	   0x64UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB	   0xc8UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB	   0xfaUL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB	   0x190UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB	   0x1f4UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB	   0x3e8UL
-	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB	   0xffffUL
-	u8 auto_mode;
-	#define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE		   0x0UL
-	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS	   0x1UL
-	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED		   0x2UL
-	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW	   0x3UL
-	#define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK	   0x4UL
-	u8 auto_pause;
-	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX		    0x1UL
-	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX		    0x2UL
-	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE	    0x4UL
-	__le16 auto_link_speed;
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB	   0x1UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB		   0xaUL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB		   0x14UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB	   0x19UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB	   0x64UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB	   0xc8UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB	   0xfaUL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB	   0x190UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB	   0x1f4UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB	   0x3e8UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB	   0xffffUL
-	__le16 auto_link_speed_mask;
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD    0x1UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB      0x2UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD      0x4UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB	    0x8UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB	    0x10UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB      0x20UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB       0x40UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB       0x80UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB       0x100UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB       0x200UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB       0x400UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB      0x800UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD     0x1000UL
-	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB       0x2000UL
-	u8 wirespeed;
-	#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF		   0x0UL
-	#define PORT_PHY_QCFG_RESP_WIRESPEED_ON		   0x1UL
-	u8 lpbk;
-	#define PORT_PHY_QCFG_RESP_LPBK_NONE			   0x0UL
-	#define PORT_PHY_QCFG_RESP_LPBK_LOCAL			   0x1UL
-	#define PORT_PHY_QCFG_RESP_LPBK_REMOTE			   0x2UL
-	u8 force_pause;
-	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX		    0x1UL
-	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX		    0x2UL
-	u8 module_status;
-	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE		   0x0UL
-	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX	   0x1UL
-	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG       0x2UL
-	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN	   0x3UL
-	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED      0x4UL
-	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE    0xffUL
-	__le32 preemphasis;
-	u8 phy_maj;
-	u8 phy_min;
-	u8 phy_bld;
-	u8 phy_type;
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN		   0x0UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR		   0x1UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4		   0x2UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR		   0x3UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR		   0x4UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2		   0x5UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX		   0x6UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR		   0x7UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET		   0x8UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE		   0x9UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY	   0xaUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L       0xbUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S       0xcUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N       0xdUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR		   0xeUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4	   0xfUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4	   0x10UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4	   0x11UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4	   0x12UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10	   0x13UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4	   0x14UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4	   0x15UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4	   0x16UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4	   0x17UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE      0x18UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET		   0x19UL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX		   0x1aUL
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX		   0x1bUL
-	u8 media_type;
-	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN		   0x0UL
-	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP		   0x1UL
-	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC		   0x2UL
-	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE		   0x3UL
-	u8 xcvr_pkg_type;
-	#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL    0x1UL
-	#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL    0x2UL
-	u8 eee_config_phy_addr;
-	#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK		    0x1fUL
-	#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT		    0
-	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED	    0x20UL
-	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE	    0x40UL
-	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI	    0x80UL
-	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK		    0xe0UL
-	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT		    5
-	u8 parallel_detect;
-	#define PORT_PHY_QCFG_RESP_PARALLEL_DETECT		    0x1UL
-	#define PORT_PHY_QCFG_RESP_RESERVED_MASK		    0xfeUL
-	#define PORT_PHY_QCFG_RESP_RESERVED_SFT		    1
-	__le16 link_partner_adv_speeds;
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB   0x2UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD   0x4UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB     0x8UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB     0x10UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB   0x20UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB    0x40UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB    0x80UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB    0x100UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB    0x200UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB    0x400UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB   0x800UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD  0x1000UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB    0x2000UL
-	u8 link_partner_adv_auto_mode;
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	link;
+	#define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+	#define PORT_PHY_QCFG_RESP_LINK_SIGNAL  0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_LINK    0x2UL
+	#define PORT_PHY_QCFG_RESP_LINK_LAST   PORT_PHY_QCFG_RESP_LINK_LINK
+	u8	unused_0;
+	__le16	link_speed;
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB   0xaUL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB   0x14UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB  0x64UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB  0xc8UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB  0xfaUL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB  0x190UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB  0x1f4UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB  0xffffUL
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
+	u8	duplex_cfg;
+	#define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+	#define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
+	#define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
+	u8	pause;
+	#define PORT_PHY_QCFG_RESP_PAUSE_TX     0x1UL
+	#define PORT_PHY_QCFG_RESP_PAUSE_RX     0x2UL
+	__le16	support_speeds;
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD     0x1UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB       0x2UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD       0x4UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB         0x8UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB         0x10UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB       0x20UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB        0x40UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB        0x80UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB        0x100UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB        0x200UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB        0x400UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB       0x800UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD      0x1000UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB        0x2000UL
+	__le16	force_link_speed;
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB   0xaUL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB   0x14UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB  0x64UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB  0xc8UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB  0xfaUL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB  0x190UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB  0x1f4UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB  0xffffUL
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
+	u8	auto_mode;
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE         0x0UL
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS   0x1UL
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED    0x2UL
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK   0x4UL
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST        PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
+	u8	auto_pause;
+	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX                0x1UL
+	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX                0x2UL
+	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE     0x4UL
+	__le16	auto_link_speed;
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB   0xaUL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB   0x14UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB  0x64UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB  0xc8UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB  0xfaUL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB  0x190UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB  0x1f4UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB  0xffffUL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
+	__le16	auto_link_speed_mask;
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD     0x1UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB       0x2UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD       0x4UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB         0x8UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB         0x10UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB       0x20UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB        0x40UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB        0x80UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB        0x100UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB        0x200UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB        0x400UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB       0x800UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD      0x1000UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB        0x2000UL
+	u8	wirespeed;
+	#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+	#define PORT_PHY_QCFG_RESP_WIRESPEED_ON  0x1UL
+	#define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
+	u8	lpbk;
+	#define PORT_PHY_QCFG_RESP_LPBK_NONE   0x0UL
+	#define PORT_PHY_QCFG_RESP_LPBK_LOCAL  0x1UL
+	#define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+	#define PORT_PHY_QCFG_RESP_LPBK_LAST  PORT_PHY_QCFG_RESP_LPBK_REMOTE
+	u8	force_pause;
+	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX     0x1UL
+	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX     0x2UL
+	u8	module_status;
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE          0x0UL
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX     0x1UL
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG    0x2UL
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN       0x3UL
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED   0x4UL
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST         PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
+	__le32	preemphasis;
+	u8	phy_maj;
+	u8	phy_min;
+	u8	phy_bld;
+	u8	phy_type;
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN          0x0UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR           0x1UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4          0x2UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR           0x3UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR           0x4UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2          0x5UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX           0x6UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR           0x7UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET            0x8UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE           0x9UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY      0xaUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L  0xbUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S  0xcUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N  0xdUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR       0xeUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4     0xfUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4     0x10UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4     0x11UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4     0x12UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10    0x13UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4      0x14UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4      0x15UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4      0x16UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4      0x17UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET         0x19UL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX        0x1aUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX        0x1bUL
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST            PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX
+	u8	media_type;
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP      0x1UL
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC     0x2UL
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE   0x3UL
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST   PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE
+	u8	xcvr_pkg_type;
+	#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+	#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
+	#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST         PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
+	u8	eee_config_phy_addr;
+	#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK              0x1fUL
+	#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT               0
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK            0xe0UL
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT             5
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED      0x20UL
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE       0x40UL
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI       0x80UL
+	u8	parallel_detect;
+	#define PORT_PHY_QCFG_RESP_PARALLEL_DETECT     0x1UL
+	__le16	link_partner_adv_speeds;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD     0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB       0x2UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD       0x4UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB         0x8UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB         0x10UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB       0x20UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB        0x40UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB        0x80UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB        0x100UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB        0x200UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB        0x400UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB       0x800UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD      0x1000UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB        0x2000UL
+	u8	link_partner_adv_auto_mode;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE         0x0UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS   0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED    0x2UL
 	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
-	u8 link_partner_adv_pause;
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX       0x1UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX       0x2UL
-	__le16 adv_eee_link_speed_mask;
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1   0x1UL
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB   0x2UL
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2   0x4UL
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB     0x8UL
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3   0x10UL
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4   0x20UL
-	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB    0x40UL
-	__le16 link_partner_adv_eee_link_speed_mask;
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
-	__le32 xcvr_identifier_type_tx_lpi_timer;
-	#define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK		    0xffffffUL
-	#define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT		    0
-	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK       0xff000000UL
-	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT	    24
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK   0x4UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST        PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
+	u8	link_partner_adv_pause;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX     0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX     0x2UL
+	__le16	adv_eee_link_speed_mask;
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1     0x1UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB     0x2UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2     0x4UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB       0x8UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3     0x10UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4     0x20UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB      0x40UL
+	__le16	link_partner_adv_eee_link_speed_mask;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1     0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB     0x2UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2     0x4UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB       0x8UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3     0x10UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4     0x20UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB      0x40UL
+	__le32	xcvr_identifier_type_tx_lpi_timer;
+	#define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK            0xffffffUL
+	#define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT             0
+	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK    0xff000000UL
+	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT     24
 	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN   (0x0UL << 24)
 	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP       (0x3UL << 24)
 	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP      (0xcUL << 24)
 	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS  (0xdUL << 24)
 	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28    (0x11UL << 24)
-	__le16 fec_cfg;
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED      0x1UL
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED   0x2UL
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED     0x4UL
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED  0x8UL
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED    0x10UL
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED  0x20UL
-	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED    0x40UL
-	u8 duplex_state;
-	#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF		   0x0UL
-	#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL		   0x1UL
-	u8 unused_1;
-	char phy_vendor_name[16];
-	char phy_vendor_partnumber[16];
-	__le32 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 unused_5;
-	u8 valid;
+	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST     PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
+	__le16	fec_cfg;
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED         0x1UL
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED      0x2UL
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED        0x4UL
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED     0x8UL
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED       0x10UL
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED     0x20UL
+	#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED       0x40UL
+	u8	duplex_state;
+	#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+	#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
+	#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
+	u8	option_flags;
+	#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT     0x1UL
+	char	phy_vendor_name[16];
+	char	phy_vendor_partnumber[16];
+	u8	unused_2[7];
+	u8	valid;
 };
 
-/* hwrm_port_mac_cfg */
-/* Input (40 bytes) */
+/* hwrm_port_mac_cfg_input (size:320b/40B) */
 struct hwrm_port_mac_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK		    0x1UL
-	#define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE	    0x2UL
-	#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE       0x4UL
-	#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE	    0x8UL
-	#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE    0x10UL
-	#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE   0x20UL
-	#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE    0x40UL
-	#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE   0x80UL
-	#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE		    0x100UL
-	#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE		    0x200UL
-	#define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE	    0x400UL
-	#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE      0x800UL
-	#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE	    0x1000UL
-	__le32 enables;
-	#define PORT_MAC_CFG_REQ_ENABLES_IPG			    0x1UL
-	#define PORT_MAC_CFG_REQ_ENABLES_LPBK			    0x2UL
-	#define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI      0x4UL
-	#define PORT_MAC_CFG_REQ_ENABLES_RESERVED1		    0x8UL
-	#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI    0x10UL
-	#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI	    0x20UL
-	#define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
-	#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
-	#define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG		    0x100UL
-	__le16 port_id;
-	u8 ipg;
-	u8 lpbk;
-	#define PORT_MAC_CFG_REQ_LPBK_NONE			   0x0UL
-	#define PORT_MAC_CFG_REQ_LPBK_LOCAL			   0x1UL
-	#define PORT_MAC_CFG_REQ_LPBK_REMOTE			   0x2UL
-	u8 vlan_pri2cos_map_pri;
-	u8 reserved1;
-	u8 tunnel_pri2cos_map_pri;
-	u8 dscp2pri_map_pri;
-	__le16 rx_ts_capture_ptp_msg_type;
-	__le16 tx_ts_capture_ptp_msg_type;
-	u8 cos_field_cfg;
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1		    0x1UL
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK   0x6UL
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT    1
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST    PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT  3
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST    PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK    0xe0UL
-	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT     5
-	u8 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK                    0x1UL
+	#define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE           0x2UL
+	#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE         0x4UL
+	#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE            0x8UL
+	#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE      0x10UL
+	#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE     0x20UL
+	#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE      0x40UL
+	#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE     0x80UL
+	#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE                0x100UL
+	#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE               0x200UL
+	#define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE          0x400UL
+	#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE        0x800UL
+	#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE           0x1000UL
+	__le32	enables;
+	#define PORT_MAC_CFG_REQ_ENABLES_IPG                            0x1UL
+	#define PORT_MAC_CFG_REQ_ENABLES_LPBK                           0x2UL
+	#define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI           0x4UL
+	#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI         0x10UL
+	#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI               0x20UL
+	#define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE     0x40UL
+	#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE     0x80UL
+	#define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG                  0x100UL
+	__le16	port_id;
+	u8	ipg;
+	u8	lpbk;
+	#define PORT_MAC_CFG_REQ_LPBK_NONE   0x0UL
+	#define PORT_MAC_CFG_REQ_LPBK_LOCAL  0x1UL
+	#define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+	#define PORT_MAC_CFG_REQ_LPBK_LAST  PORT_MAC_CFG_REQ_LPBK_REMOTE
+	u8	vlan_pri2cos_map_pri;
+	u8	reserved1;
+	u8	tunnel_pri2cos_map_pri;
+	u8	dscp2pri_map_pri;
+	__le16	rx_ts_capture_ptp_msg_type;
+	__le16	tx_ts_capture_ptp_msg_type;
+	u8	cos_field_cfg;
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1                     0x1UL
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK         0x6UL
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT          1
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST      (0x0UL << 1)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER          (0x1UL << 1)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST      (0x2UL << 1)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED    (0x3UL << 1)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST          PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK       0x18UL
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT        3
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST    (0x0UL << 3)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER        (0x1UL << 3)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST    (0x2UL << 3)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED  (0x3UL << 3)
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST        PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK          0xe0UL
+	#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT           5
+	u8	unused_0[3];
 };
 
-/* Output (16 bytes) */
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
 struct hwrm_port_mac_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 mru;
-	__le16 mtu;
-	u8 ipg;
-	u8 lpbk;
-	#define PORT_MAC_CFG_RESP_LPBK_NONE			   0x0UL
-	#define PORT_MAC_CFG_RESP_LPBK_LOCAL			   0x1UL
-	#define PORT_MAC_CFG_RESP_LPBK_REMOTE			   0x2UL
-	u8 unused_0;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	mru;
+	__le16	mtu;
+	u8	ipg;
+	u8	lpbk;
+	#define PORT_MAC_CFG_RESP_LPBK_NONE   0x0UL
+	#define PORT_MAC_CFG_RESP_LPBK_LOCAL  0x1UL
+	#define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+	#define PORT_MAC_CFG_RESP_LPBK_LAST  PORT_MAC_CFG_RESP_LPBK_REMOTE
+	u8	unused_0;
+	u8	valid;
 };
 
-/* hwrm_port_mac_ptp_qcfg */
-/* Input (24 bytes) */
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
 struct hwrm_port_mac_ptp_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (80 bytes) */
+/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
 struct hwrm_port_mac_ptp_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 flags;
-	#define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS	    0x1UL
-	#define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS	    0x2UL
-	u8 unused_0;
-	__le16 unused_1;
-	__le32 rx_ts_reg_off_lower;
-	__le32 rx_ts_reg_off_upper;
-	__le32 rx_ts_reg_off_seq_id;
-	__le32 rx_ts_reg_off_src_id_0;
-	__le32 rx_ts_reg_off_src_id_1;
-	__le32 rx_ts_reg_off_src_id_2;
-	__le32 rx_ts_reg_off_domain_id;
-	__le32 rx_ts_reg_off_fifo;
-	__le32 rx_ts_reg_off_fifo_adv;
-	__le32 rx_ts_reg_off_granularity;
-	__le32 tx_ts_reg_off_lower;
-	__le32 tx_ts_reg_off_upper;
-	__le32 tx_ts_reg_off_seq_id;
-	__le32 tx_ts_reg_off_fifo;
-	__le32 tx_ts_reg_off_granularity;
-	__le32 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 unused_5;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	flags;
+	#define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS     0x1UL
+	#define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS       0x2UL
+	u8	unused_0[3];
+	__le32	rx_ts_reg_off_lower;
+	__le32	rx_ts_reg_off_upper;
+	__le32	rx_ts_reg_off_seq_id;
+	__le32	rx_ts_reg_off_src_id_0;
+	__le32	rx_ts_reg_off_src_id_1;
+	__le32	rx_ts_reg_off_src_id_2;
+	__le32	rx_ts_reg_off_domain_id;
+	__le32	rx_ts_reg_off_fifo;
+	__le32	rx_ts_reg_off_fifo_adv;
+	__le32	rx_ts_reg_off_granularity;
+	__le32	tx_ts_reg_off_lower;
+	__le32	tx_ts_reg_off_upper;
+	__le32	tx_ts_reg_off_seq_id;
+	__le32	tx_ts_reg_off_fifo;
+	__le32	tx_ts_reg_off_granularity;
+	u8	unused_1[7];
+	u8	valid;
 };
 
-/* hwrm_port_qstats */
-/* Input (40 bytes) */
+/* hwrm_port_qstats_input (size:320b/40B) */
 struct hwrm_port_qstats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2[3];
-	u8 unused_3;
-	__le64 tx_stat_host_addr;
-	__le64 rx_stat_host_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
+	__le64	tx_stat_host_addr;
+	__le64	rx_stat_host_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_port_qstats_output (size:128b/16B) */
 struct hwrm_port_qstats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 tx_stat_size;
-	__le16 rx_stat_size;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	tx_stat_size;
+	__le16	rx_stat_size;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_port_lpbk_qstats */
-/* Input (16 bytes) */
+/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
 struct hwrm_port_lpbk_qstats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (96 bytes) */
+/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
 struct hwrm_port_lpbk_qstats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 lpbk_ucast_frames;
-	__le64 lpbk_mcast_frames;
-	__le64 lpbk_bcast_frames;
-	__le64 lpbk_ucast_bytes;
-	__le64 lpbk_mcast_bytes;
-	__le64 lpbk_bcast_bytes;
-	__le64 tx_stat_discard;
-	__le64 tx_stat_error;
-	__le64 rx_stat_discard;
-	__le64 rx_stat_error;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	lpbk_ucast_frames;
+	__le64	lpbk_mcast_frames;
+	__le64	lpbk_bcast_frames;
+	__le64	lpbk_ucast_bytes;
+	__le64	lpbk_mcast_bytes;
+	__le64	lpbk_bcast_bytes;
+	__le64	tx_stat_discard;
+	__le64	tx_stat_error;
+	__le64	rx_stat_discard;
+	__le64	rx_stat_error;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_port_clr_stats */
-/* Input (24 bytes) */
+/* hwrm_port_clr_stats_input (size:192b/24B) */
 struct hwrm_port_clr_stats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_port_clr_stats_output (size:128b/16B) */
 struct hwrm_port_clr_stats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_port_lpbk_clr_stats */
-/* Input (16 bytes) */
+/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
 struct hwrm_port_lpbk_clr_stats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
 struct hwrm_port_lpbk_clr_stats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_port_phy_qcaps */
-/* Input (24 bytes) */
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
 struct hwrm_port_phy_qcaps_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (24 bytes) */
+/* hwrm_port_phy_qcaps_output (size:192b/24B) */
 struct hwrm_port_phy_qcaps_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 flags;
-	#define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED	    0x1UL
-	#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK		    0xfeUL
-	#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT		    1
-	u8 port_cnt;
-	#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN		   0x0UL
-	#define PORT_PHY_QCAPS_RESP_PORT_CNT_1			   0x1UL
-	#define PORT_PHY_QCAPS_RESP_PORT_CNT_2			   0x2UL
-	#define PORT_PHY_QCAPS_RESP_PORT_CNT_3			   0x3UL
-	#define PORT_PHY_QCAPS_RESP_PORT_CNT_4			   0x4UL
-	__le16 supported_speeds_force_mode;
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
-	__le16 supported_speeds_auto_mode;
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
-	__le16 supported_speeds_eee_mode;
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB  0x8UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
-	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
-	__le32 tx_lpi_timer_low;
-	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK	    0xffffffUL
-	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT	    0
-	#define PORT_PHY_QCAPS_RESP_RSVD2_MASK			    0xff000000UL
-	#define PORT_PHY_QCAPS_RESP_RSVD2_SFT			    24
-	__le32 valid_tx_lpi_timer_high;
-	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK	    0xffffffUL
-	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT	    0
-	#define PORT_PHY_QCAPS_RESP_VALID_MASK			    0xff000000UL
-	#define PORT_PHY_QCAPS_RESP_VALID_SFT			    24
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	flags;
+	#define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED     0x1UL
+	#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK        0xfeUL
+	#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT         1
+	u8	port_cnt;
+	#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+	#define PORT_PHY_QCAPS_RESP_PORT_CNT_1       0x1UL
+	#define PORT_PHY_QCAPS_RESP_PORT_CNT_2       0x2UL
+	#define PORT_PHY_QCAPS_RESP_PORT_CNT_3       0x3UL
+	#define PORT_PHY_QCAPS_RESP_PORT_CNT_4       0x4UL
+	#define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST   PORT_PHY_QCAPS_RESP_PORT_CNT_4
+	__le16	supported_speeds_force_mode;
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD     0x1UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB       0x2UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD       0x4UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB         0x8UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB         0x10UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB       0x20UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB        0x40UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB        0x80UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB        0x100UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB        0x200UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB        0x400UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB       0x800UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD      0x1000UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB        0x2000UL
+	__le16	supported_speeds_auto_mode;
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD     0x1UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB       0x2UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD       0x4UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB         0x8UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB         0x10UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB       0x20UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB        0x40UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB        0x80UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB        0x100UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB        0x200UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB        0x400UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB       0x800UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD      0x1000UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB        0x2000UL
+	__le16	supported_speeds_eee_mode;
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1     0x1UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB     0x2UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2     0x4UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB       0x8UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3     0x10UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4     0x20UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB      0x40UL
+	__le32	tx_lpi_timer_low;
+	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+	#define PORT_PHY_QCAPS_RESP_RSVD2_MASK           0xff000000UL
+	#define PORT_PHY_QCAPS_RESP_RSVD2_SFT            24
+	__le32	valid_tx_lpi_timer_high;
+	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+	#define PORT_PHY_QCAPS_RESP_VALID_MASK            0xff000000UL
+	#define PORT_PHY_QCAPS_RESP_VALID_SFT             24
 };
 
-/* hwrm_port_phy_i2c_read */
-/* Input (40 bytes) */
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
 struct hwrm_port_phy_i2c_read_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	__le32 enables;
-	#define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET	    0x1UL
-	__le16 port_id;
-	u8 i2c_slave_addr;
-	u8 unused_0;
-	__le16 page_number;
-	__le16 page_offset;
-	u8 data_length;
-	u8 unused_1[7];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	__le32	enables;
+	#define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET     0x1UL
+	__le16	port_id;
+	u8	i2c_slave_addr;
+	u8	unused_0;
+	__le16	page_number;
+	__le16	page_offset;
+	u8	data_length;
+	u8	unused_1[7];
 };
 
-/* Output (80 bytes) */
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
 struct hwrm_port_phy_i2c_read_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 data[16];
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	data[16];
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_port_led_cfg */
-/* Input (64 bytes) */
+/* hwrm_port_led_cfg_input (size:512b/64B) */
 struct hwrm_port_led_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define PORT_LED_CFG_REQ_ENABLES_LED0_ID		    0x1UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED0_STATE		    0x2UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR		    0x4UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON		    0x8UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF	    0x10UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID		    0x20UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED1_ID		    0x40UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED1_STATE		    0x80UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR		    0x100UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON		    0x200UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF	    0x400UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID		    0x800UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED2_ID		    0x1000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED2_STATE		    0x2000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR		    0x4000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON		    0x8000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF	    0x10000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID		    0x20000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED3_ID		    0x40000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED3_STATE		    0x80000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR		    0x100000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON		    0x200000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF	    0x400000UL
-	#define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID		    0x800000UL
-	__le16 port_id;
-	u8 num_leds;
-	u8 rsvd;
-	u8 led0_id;
-	u8 led0_state;
-	#define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED0_STATE_OFF		   0x1UL
-	#define PORT_LED_CFG_REQ_LED0_STATE_ON			   0x2UL
-	#define PORT_LED_CFG_REQ_LED0_STATE_BLINK		   0x3UL
-	#define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT		   0x4UL
-	u8 led0_color;
-	#define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED0_COLOR_AMBER		   0x1UL
-	#define PORT_LED_CFG_REQ_LED0_COLOR_GREEN		   0x2UL
-	#define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER		   0x3UL
-	u8 unused_0;
-	__le16 led0_blink_on;
-	__le16 led0_blink_off;
-	u8 led0_group_id;
-	u8 rsvd0;
-	u8 led1_id;
-	u8 led1_state;
-	#define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED1_STATE_OFF		   0x1UL
-	#define PORT_LED_CFG_REQ_LED1_STATE_ON			   0x2UL
-	#define PORT_LED_CFG_REQ_LED1_STATE_BLINK		   0x3UL
-	#define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT		   0x4UL
-	u8 led1_color;
-	#define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED1_COLOR_AMBER		   0x1UL
-	#define PORT_LED_CFG_REQ_LED1_COLOR_GREEN		   0x2UL
-	#define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER		   0x3UL
-	u8 unused_1;
-	__le16 led1_blink_on;
-	__le16 led1_blink_off;
-	u8 led1_group_id;
-	u8 rsvd1;
-	u8 led2_id;
-	u8 led2_state;
-	#define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED2_STATE_OFF		   0x1UL
-	#define PORT_LED_CFG_REQ_LED2_STATE_ON			   0x2UL
-	#define PORT_LED_CFG_REQ_LED2_STATE_BLINK		   0x3UL
-	#define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT		   0x4UL
-	u8 led2_color;
-	#define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED2_COLOR_AMBER		   0x1UL
-	#define PORT_LED_CFG_REQ_LED2_COLOR_GREEN		   0x2UL
-	#define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER		   0x3UL
-	u8 unused_2;
-	__le16 led2_blink_on;
-	__le16 led2_blink_off;
-	u8 led2_group_id;
-	u8 rsvd2;
-	u8 led3_id;
-	u8 led3_state;
-	#define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED3_STATE_OFF		   0x1UL
-	#define PORT_LED_CFG_REQ_LED3_STATE_ON			   0x2UL
-	#define PORT_LED_CFG_REQ_LED3_STATE_BLINK		   0x3UL
-	#define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT		   0x4UL
-	u8 led3_color;
-	#define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT		   0x0UL
-	#define PORT_LED_CFG_REQ_LED3_COLOR_AMBER		   0x1UL
-	#define PORT_LED_CFG_REQ_LED3_COLOR_GREEN		   0x2UL
-	#define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER		   0x3UL
-	u8 unused_3;
-	__le16 led3_blink_on;
-	__le16 led3_blink_off;
-	u8 led3_group_id;
-	u8 rsvd3;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define PORT_LED_CFG_REQ_ENABLES_LED0_ID            0x1UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED0_STATE         0x2UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR         0x4UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON      0x8UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF     0x10UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID      0x20UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED1_ID            0x40UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED1_STATE         0x80UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR         0x100UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON      0x200UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF     0x400UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID      0x800UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED2_ID            0x1000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED2_STATE         0x2000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR         0x4000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON      0x8000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF     0x10000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID      0x20000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED3_ID            0x40000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED3_STATE         0x80000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR         0x100000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON      0x200000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF     0x400000UL
+	#define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID      0x800000UL
+	__le16	port_id;
+	u8	num_leds;
+	u8	rsvd;
+	u8	led0_id;
+	u8	led0_state;
+	#define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT  0x0UL
+	#define PORT_LED_CFG_REQ_LED0_STATE_OFF      0x1UL
+	#define PORT_LED_CFG_REQ_LED0_STATE_ON       0x2UL
+	#define PORT_LED_CFG_REQ_LED0_STATE_BLINK    0x3UL
+	#define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+	#define PORT_LED_CFG_REQ_LED0_STATE_LAST    PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
+	u8	led0_color;
+	#define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_CFG_REQ_LED0_COLOR_AMBER      0x1UL
+	#define PORT_LED_CFG_REQ_LED0_COLOR_GREEN      0x2UL
+	#define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_CFG_REQ_LED0_COLOR_LAST      PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
+	u8	unused_0;
+	__le16	led0_blink_on;
+	__le16	led0_blink_off;
+	u8	led0_group_id;
+	u8	rsvd0;
+	u8	led1_id;
+	u8	led1_state;
+	#define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT  0x0UL
+	#define PORT_LED_CFG_REQ_LED1_STATE_OFF      0x1UL
+	#define PORT_LED_CFG_REQ_LED1_STATE_ON       0x2UL
+	#define PORT_LED_CFG_REQ_LED1_STATE_BLINK    0x3UL
+	#define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+	#define PORT_LED_CFG_REQ_LED1_STATE_LAST    PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
+	u8	led1_color;
+	#define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_CFG_REQ_LED1_COLOR_AMBER      0x1UL
+	#define PORT_LED_CFG_REQ_LED1_COLOR_GREEN      0x2UL
+	#define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_CFG_REQ_LED1_COLOR_LAST      PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
+	u8	unused_1;
+	__le16	led1_blink_on;
+	__le16	led1_blink_off;
+	u8	led1_group_id;
+	u8	rsvd1;
+	u8	led2_id;
+	u8	led2_state;
+	#define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT  0x0UL
+	#define PORT_LED_CFG_REQ_LED2_STATE_OFF      0x1UL
+	#define PORT_LED_CFG_REQ_LED2_STATE_ON       0x2UL
+	#define PORT_LED_CFG_REQ_LED2_STATE_BLINK    0x3UL
+	#define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+	#define PORT_LED_CFG_REQ_LED2_STATE_LAST    PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
+	u8	led2_color;
+	#define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_CFG_REQ_LED2_COLOR_AMBER      0x1UL
+	#define PORT_LED_CFG_REQ_LED2_COLOR_GREEN      0x2UL
+	#define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_CFG_REQ_LED2_COLOR_LAST      PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
+	u8	unused_2;
+	__le16	led2_blink_on;
+	__le16	led2_blink_off;
+	u8	led2_group_id;
+	u8	rsvd2;
+	u8	led3_id;
+	u8	led3_state;
+	#define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT  0x0UL
+	#define PORT_LED_CFG_REQ_LED3_STATE_OFF      0x1UL
+	#define PORT_LED_CFG_REQ_LED3_STATE_ON       0x2UL
+	#define PORT_LED_CFG_REQ_LED3_STATE_BLINK    0x3UL
+	#define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+	#define PORT_LED_CFG_REQ_LED3_STATE_LAST    PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
+	u8	led3_color;
+	#define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_CFG_REQ_LED3_COLOR_AMBER      0x1UL
+	#define PORT_LED_CFG_REQ_LED3_COLOR_GREEN      0x2UL
+	#define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_CFG_REQ_LED3_COLOR_LAST      PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
+	u8	unused_3;
+	__le16	led3_blink_on;
+	__le16	led3_blink_off;
+	u8	led3_group_id;
+	u8	rsvd3;
 };
 
-/* Output (16 bytes) */
+/* hwrm_port_led_cfg_output (size:128b/16B) */
 struct hwrm_port_led_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_port_led_qcaps */
-/* Input (24 bytes) */
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
+struct hwrm_port_led_qcfg_input {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
+};
+
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
+struct hwrm_port_led_qcfg_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	num_leds;
+	u8	led0_id;
+	u8	led0_type;
+	#define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCFG_RESP_LED0_TYPE_LAST    PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
+	u8	led0_state;
+	#define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT  0x0UL
+	#define PORT_LED_QCFG_RESP_LED0_STATE_OFF      0x1UL
+	#define PORT_LED_QCFG_RESP_LED0_STATE_ON       0x2UL
+	#define PORT_LED_QCFG_RESP_LED0_STATE_BLINK    0x3UL
+	#define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
+	#define PORT_LED_QCFG_RESP_LED0_STATE_LAST    PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
+	u8	led0_color;
+	#define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER      0x1UL
+	#define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN      0x2UL
+	#define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_QCFG_RESP_LED0_COLOR_LAST      PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
+	u8	unused_0;
+	__le16	led0_blink_on;
+	__le16	led0_blink_off;
+	u8	led0_group_id;
+	u8	led1_id;
+	u8	led1_type;
+	#define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCFG_RESP_LED1_TYPE_LAST    PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
+	u8	led1_state;
+	#define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT  0x0UL
+	#define PORT_LED_QCFG_RESP_LED1_STATE_OFF      0x1UL
+	#define PORT_LED_QCFG_RESP_LED1_STATE_ON       0x2UL
+	#define PORT_LED_QCFG_RESP_LED1_STATE_BLINK    0x3UL
+	#define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
+	#define PORT_LED_QCFG_RESP_LED1_STATE_LAST    PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
+	u8	led1_color;
+	#define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER      0x1UL
+	#define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN      0x2UL
+	#define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_QCFG_RESP_LED1_COLOR_LAST      PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
+	u8	unused_1;
+	__le16	led1_blink_on;
+	__le16	led1_blink_off;
+	u8	led1_group_id;
+	u8	led2_id;
+	u8	led2_type;
+	#define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCFG_RESP_LED2_TYPE_LAST    PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
+	u8	led2_state;
+	#define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT  0x0UL
+	#define PORT_LED_QCFG_RESP_LED2_STATE_OFF      0x1UL
+	#define PORT_LED_QCFG_RESP_LED2_STATE_ON       0x2UL
+	#define PORT_LED_QCFG_RESP_LED2_STATE_BLINK    0x3UL
+	#define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
+	#define PORT_LED_QCFG_RESP_LED2_STATE_LAST    PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
+	u8	led2_color;
+	#define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER      0x1UL
+	#define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN      0x2UL
+	#define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_QCFG_RESP_LED2_COLOR_LAST      PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
+	u8	unused_2;
+	__le16	led2_blink_on;
+	__le16	led2_blink_off;
+	u8	led2_group_id;
+	u8	led3_id;
+	u8	led3_type;
+	#define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCFG_RESP_LED3_TYPE_LAST    PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
+	u8	led3_state;
+	#define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT  0x0UL
+	#define PORT_LED_QCFG_RESP_LED3_STATE_OFF      0x1UL
+	#define PORT_LED_QCFG_RESP_LED3_STATE_ON       0x2UL
+	#define PORT_LED_QCFG_RESP_LED3_STATE_BLINK    0x3UL
+	#define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
+	#define PORT_LED_QCFG_RESP_LED3_STATE_LAST    PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
+	u8	led3_color;
+	#define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT    0x0UL
+	#define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER      0x1UL
+	#define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN      0x2UL
+	#define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
+	#define PORT_LED_QCFG_RESP_LED3_COLOR_LAST      PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
+	u8	unused_3;
+	__le16	led3_blink_on;
+	__le16	led3_blink_off;
+	u8	led3_group_id;
+	u8	unused_4[6];
+	u8	valid;
+};
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
 struct hwrm_port_led_qcaps_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (48 bytes) */
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
 struct hwrm_port_led_qcaps_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 num_leds;
-	u8 unused_0[3];
-	u8 led0_id;
-	u8 led0_type;
-	#define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED		   0x0UL
-	#define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY		   0x1UL
-	#define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID		   0xffUL
-	u8 led0_group_id;
-	u8 unused_1;
-	__le16 led0_state_caps;
-	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED  0x2UL
-	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED   0x4UL
-	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
-	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
-	__le16 led0_color_caps;
-	#define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
-	#define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
-	u8 led1_id;
-	u8 led1_type;
-	#define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED		   0x0UL
-	#define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY		   0x1UL
-	#define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID		   0xffUL
-	u8 led1_group_id;
-	u8 unused_2;
-	__le16 led1_state_caps;
-	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED  0x2UL
-	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED   0x4UL
-	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
-	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
-	__le16 led1_color_caps;
-	#define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
-	#define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
-	u8 led2_id;
-	u8 led2_type;
-	#define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED		   0x0UL
-	#define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY		   0x1UL
-	#define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID		   0xffUL
-	u8 led2_group_id;
-	u8 unused_3;
-	__le16 led2_state_caps;
-	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED  0x2UL
-	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED   0x4UL
-	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
-	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
-	__le16 led2_color_caps;
-	#define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
-	#define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
-	u8 led3_id;
-	u8 led3_type;
-	#define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED		   0x0UL
-	#define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY		   0x1UL
-	#define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID		   0xffUL
-	u8 led3_group_id;
-	u8 unused_4;
-	__le16 led3_state_caps;
-	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED  0x2UL
-	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED   0x4UL
-	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
-	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
-	__le16 led3_color_caps;
-	#define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD	    0x1UL
-	#define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
-	#define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
-	u8 unused_5;
-	u8 unused_6;
-	u8 unused_7;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	num_leds;
+	u8	unused[3];
+	u8	led0_id;
+	u8	led0_type;
+	#define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST    PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
+	u8	led0_group_id;
+	u8	unused_0;
+	__le16	led0_state_caps;
+	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED                 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED           0x2UL
+	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED            0x4UL
+	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED         0x8UL
+	#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED     0x10UL
+	__le16	led0_color_caps;
+	#define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD                0x1UL
+	#define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED     0x2UL
+	#define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED     0x4UL
+	u8	led1_id;
+	u8	led1_type;
+	#define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST    PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
+	u8	led1_group_id;
+	u8	unused_1;
+	__le16	led1_state_caps;
+	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED                 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED           0x2UL
+	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED            0x4UL
+	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED         0x8UL
+	#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED     0x10UL
+	__le16	led1_color_caps;
+	#define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD                0x1UL
+	#define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED     0x2UL
+	#define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED     0x4UL
+	u8	led2_id;
+	u8	led2_type;
+	#define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST    PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
+	u8	led2_group_id;
+	u8	unused_2;
+	__le16	led2_state_caps;
+	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED                 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED           0x2UL
+	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED            0x4UL
+	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED         0x8UL
+	#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED     0x10UL
+	__le16	led2_color_caps;
+	#define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD                0x1UL
+	#define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED     0x2UL
+	#define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED     0x4UL
+	u8	led3_id;
+	u8	led3_type;
+	#define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED    0x0UL
+	#define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID  0xffUL
+	#define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST    PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
+	u8	led3_group_id;
+	u8	unused_3;
+	__le16	led3_state_caps;
+	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED                 0x1UL
+	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED           0x2UL
+	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED            0x4UL
+	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED         0x8UL
+	#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED     0x10UL
+	__le16	led3_color_caps;
+	#define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD                0x1UL
+	#define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED     0x2UL
+	#define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED     0x4UL
+	u8	unused_4[3];
+	u8	valid;
 };
 
-/* hwrm_queue_qportcfg */
-/* Input (24 bytes) */
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
 struct hwrm_queue_qportcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH			    0x1UL
-	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX		   0x0UL
-	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX		   0x1UL
-	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST    QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
-	__le16 port_id;
-	__le16 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH     0x1UL
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX    0x0UL
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX    0x1UL
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+	__le16	port_id;
+	u8	unused_0[2];
 };
 
-/* Output (32 bytes) */
+/* hwrm_queue_qportcfg_output (size:256b/32B) */
 struct hwrm_queue_qportcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 max_configurable_queues;
-	u8 max_configurable_lossless_queues;
-	u8 queue_cfg_allowed;
-	u8 queue_cfg_info;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG	    0x1UL
-	u8 queue_pfcenable_cfg_allowed;
-	u8 queue_pri2cos_cfg_allowed;
-	u8 queue_cos2bw_cfg_allowed;
-	u8 queue_id0;
-	u8 queue_id0_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	max_configurable_queues;
+	u8	max_configurable_lossless_queues;
+	u8	queue_cfg_allowed;
+	u8	queue_cfg_info;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG     0x1UL
+	u8	queue_pfcenable_cfg_allowed;
+	u8	queue_pri2cos_cfg_allowed;
+	u8	queue_cos2bw_cfg_allowed;
+	u8	queue_id0;
+	u8	queue_id0_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id1;
-	u8 queue_id1_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id1;
+	u8	queue_id1_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id2;
-	u8 queue_id2_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id2;
+	u8	queue_id2_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id3;
-	u8 queue_id3_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id3;
+	u8	queue_id3_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id4;
-	u8 queue_id4_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id4;
+	u8	queue_id4_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id5;
-	u8 queue_id5_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id5;
+	u8	queue_id5_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id6;
-	u8 queue_id6_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id6;
+	u8	queue_id6_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 queue_id7;
-	u8 queue_id7_service_profile;
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+	u8	queue_id7;
+	u8	queue_id7_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY    0x0UL
 	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
-	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
-	u8 valid;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST    QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+	u8	valid;
 };
 
-/* hwrm_queue_cfg */
-/* Input (40 bytes) */
+/* hwrm_queue_cfg_input (size:320b/40B) */
 struct hwrm_queue_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define QUEUE_CFG_REQ_FLAGS_PATH_MASK			    0x3UL
-	#define QUEUE_CFG_REQ_FLAGS_PATH_SFT			    0
-	#define QUEUE_CFG_REQ_FLAGS_PATH_TX			   0x0UL
-	#define QUEUE_CFG_REQ_FLAGS_PATH_RX			   0x1UL
-	#define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR			   0x2UL
-	#define QUEUE_CFG_REQ_FLAGS_PATH_LAST    QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
-	__le32 enables;
-	#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN			    0x1UL
-	#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE		    0x2UL
-	__le32 queue_id;
-	__le32 dflt_len;
-	u8 service_profile;
-	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY		   0x0UL
-	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS		   0x1UL
-	#define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN		   0xffUL
-	u8 unused_0[7];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+	#define QUEUE_CFG_REQ_FLAGS_PATH_SFT  0
+	#define QUEUE_CFG_REQ_FLAGS_PATH_TX     0x0UL
+	#define QUEUE_CFG_REQ_FLAGS_PATH_RX     0x1UL
+	#define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR  0x2UL
+	#define QUEUE_CFG_REQ_FLAGS_PATH_LAST  QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+	__le32	enables;
+	#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN            0x1UL
+	#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE     0x2UL
+	__le32	queue_id;
+	__le32	dflt_len;
+	u8	service_profile;
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY    0x0UL
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN  0xffUL
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST    QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
+	u8	unused_0[7];
 };
 
-/* Output (16 bytes) */
+/* hwrm_queue_cfg_output (size:128b/16B) */
 struct hwrm_queue_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_queue_pfcenable_qcfg */
-/* Input (24 bytes) */
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
 struct hwrm_queue_pfcenable_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
 struct hwrm_queue_pfcenable_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 flags;
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED   0x1UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED   0x2UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED   0x4UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED   0x8UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED   0x10UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED   0x20UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED   0x40UL
-	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED   0x80UL
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	flags;
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED     0x1UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED     0x2UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED     0x4UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED     0x8UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED     0x10UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED     0x20UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED     0x40UL
+	#define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED     0x80UL
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_queue_pfcenable_cfg */
-/* Input (24 bytes) */
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
 struct hwrm_queue_pfcenable_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
 	#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED     0x1UL
 	#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED     0x2UL
 	#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED     0x4UL
@@ -2440,1729 +2703,1664 @@ struct hwrm_queue_pfcenable_cfg_input {
 	#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED     0x20UL
 	#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED     0x40UL
 	#define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED     0x80UL
-	__le16 port_id;
-	__le16 unused_0;
+	__le16	port_id;
+	u8	unused_0[2];
 };
 
-/* Output (16 bytes) */
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
 struct hwrm_queue_pfcenable_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_queue_pri2cos_qcfg */
-/* Input (24 bytes) */
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
 struct hwrm_queue_pri2cos_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH		    0x1UL
-	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
-	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
-	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST    QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
-	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN		    0x2UL
-	u8 port_id;
-	u8 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH      0x1UL
+	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX     0x0UL
+	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX     0x1UL
+	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST  QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+	#define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN     0x2UL
+	u8	port_id;
+	u8	unused_0[3];
 };
 
-/* Output (24 bytes) */
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
 struct hwrm_queue_pri2cos_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 pri0_cos_queue_id;
-	u8 pri1_cos_queue_id;
-	u8 pri2_cos_queue_id;
-	u8 pri3_cos_queue_id;
-	u8 pri4_cos_queue_id;
-	u8 pri5_cos_queue_id;
-	u8 pri6_cos_queue_id;
-	u8 pri7_cos_queue_id;
-	u8 queue_cfg_info;
-	#define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG    0x1UL
-	u8 unused_0;
-	__le16 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	pri0_cos_queue_id;
+	u8	pri1_cos_queue_id;
+	u8	pri2_cos_queue_id;
+	u8	pri3_cos_queue_id;
+	u8	pri4_cos_queue_id;
+	u8	pri5_cos_queue_id;
+	u8	pri6_cos_queue_id;
+	u8	pri7_cos_queue_id;
+	u8	queue_cfg_info;
+	#define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG     0x1UL
+	u8	unused_0[6];
+	u8	valid;
 };
 
-/* hwrm_queue_pri2cos_cfg */
-/* Input (40 bytes) */
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
 struct hwrm_queue_pri2cos_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK		    0x3UL
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT		    0
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR		   (0x2UL << 0)
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST    QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
-	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN		    0x4UL
-	__le32 enables;
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID    0x1UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID    0x2UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID    0x4UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID    0x8UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID    0x10UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID    0x20UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID    0x40UL
-	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID    0x80UL
-	u8 port_id;
-	u8 pri0_cos_queue_id;
-	u8 pri1_cos_queue_id;
-	u8 pri2_cos_queue_id;
-	u8 pri3_cos_queue_id;
-	u8 pri4_cos_queue_id;
-	u8 pri5_cos_queue_id;
-	u8 pri6_cos_queue_id;
-	u8 pri7_cos_queue_id;
-	u8 unused_0[7];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT  0
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX     0x0UL
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX     0x1UL
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR  0x2UL
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST  QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN     0x4UL
+	__le32	enables;
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID     0x1UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID     0x2UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID     0x4UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID     0x8UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID     0x10UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID     0x20UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID     0x40UL
+	#define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID     0x80UL
+	u8	port_id;
+	u8	pri0_cos_queue_id;
+	u8	pri1_cos_queue_id;
+	u8	pri2_cos_queue_id;
+	u8	pri3_cos_queue_id;
+	u8	pri4_cos_queue_id;
+	u8	pri5_cos_queue_id;
+	u8	pri6_cos_queue_id;
+	u8	pri7_cos_queue_id;
+	u8	unused_0[7];
 };
 
-/* Output (16 bytes) */
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
 struct hwrm_queue_pri2cos_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_queue_cos2bw_qcfg */
-/* Input (24 bytes) */
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
 struct hwrm_queue_cos2bw_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
 };
 
-/* Output (112 bytes) */
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
 struct hwrm_queue_cos2bw_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 queue_id0;
-	u8 unused_0;
-	__le16 unused_1;
-	__le32 queue_id0_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id0_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id0_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS   0x1UL
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	queue_id0;
+	u8	unused_0;
+	__le16	unused_1;
+	__le32	queue_id0_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id0_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id0_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id0_pri_lvl;
-	u8 queue_id0_bw_weight;
-	u8 queue_id1;
-	__le32 queue_id1_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id1_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id1_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id0_pri_lvl;
+	u8	queue_id0_bw_weight;
+	u8	queue_id1;
+	__le32	queue_id1_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id1_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id1_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id1_pri_lvl;
-	u8 queue_id1_bw_weight;
-	u8 queue_id2;
-	__le32 queue_id2_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id2_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id2_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id1_pri_lvl;
+	u8	queue_id1_bw_weight;
+	u8	queue_id2;
+	__le32	queue_id2_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id2_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id2_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id2_pri_lvl;
-	u8 queue_id2_bw_weight;
-	u8 queue_id3;
-	__le32 queue_id3_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id3_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id3_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id2_pri_lvl;
+	u8	queue_id2_bw_weight;
+	u8	queue_id3;
+	__le32	queue_id3_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id3_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id3_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id3_pri_lvl;
-	u8 queue_id3_bw_weight;
-	u8 queue_id4;
-	__le32 queue_id4_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id4_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id4_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id3_pri_lvl;
+	u8	queue_id3_bw_weight;
+	u8	queue_id4;
+	__le32	queue_id4_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id4_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id4_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id4_pri_lvl;
-	u8 queue_id4_bw_weight;
-	u8 queue_id5;
-	__le32 queue_id5_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id5_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id5_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id4_pri_lvl;
+	u8	queue_id4_bw_weight;
+	u8	queue_id5;
+	__le32	queue_id5_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id5_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id5_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id5_pri_lvl;
-	u8 queue_id5_bw_weight;
-	u8 queue_id6;
-	__le32 queue_id6_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id6_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id6_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id5_pri_lvl;
+	u8	queue_id5_bw_weight;
+	u8	queue_id6;
+	__le32	queue_id6_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id6_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id6_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id6_pri_lvl;
-	u8 queue_id6_bw_weight;
-	u8 queue_id7;
-	__le32 queue_id7_min_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id7_max_bw;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE      0x10000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id7_tsa_assign;
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP    0x0UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS   0x1UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id6_pri_lvl;
+	u8	queue_id6_bw_weight;
+	u8	queue_id7;
+	__le32	queue_id7_min_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id7_max_bw;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id7_tsa_assign;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id7_pri_lvl;
-	u8 queue_id7_bw_weight;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 unused_5;
-	u8 valid;
+	#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id7_pri_lvl;
+	u8	queue_id7_bw_weight;
+	u8	unused_2[4];
+	u8	valid;
 };
 
-/* hwrm_queue_cos2bw_cfg */
-/* Input (128 bytes) */
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
 struct hwrm_queue_cos2bw_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	__le32 enables;
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID   0x1UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID   0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID   0x4UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID   0x8UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID   0x10UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID   0x20UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID   0x40UL
-	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID   0x80UL
-	__le16 port_id;
-	u8 queue_id0;
-	u8 unused_0;
-	__le32 queue_id0_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id0_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id0_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS     0x1UL
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	__le32	enables;
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID     0x2UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID     0x4UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID     0x8UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID     0x10UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID     0x20UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID     0x40UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID     0x80UL
+	__le16	port_id;
+	u8	queue_id0;
+	u8	unused_0;
+	__le32	queue_id0_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id0_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id0_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id0_pri_lvl;
-	u8 queue_id0_bw_weight;
-	u8 queue_id1;
-	__le32 queue_id1_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id1_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id1_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id0_pri_lvl;
+	u8	queue_id0_bw_weight;
+	u8	queue_id1;
+	__le32	queue_id1_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id1_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id1_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id1_pri_lvl;
-	u8 queue_id1_bw_weight;
-	u8 queue_id2;
-	__le32 queue_id2_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id2_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id2_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id1_pri_lvl;
+	u8	queue_id1_bw_weight;
+	u8	queue_id2;
+	__le32	queue_id2_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id2_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id2_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id2_pri_lvl;
-	u8 queue_id2_bw_weight;
-	u8 queue_id3;
-	__le32 queue_id3_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id3_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id3_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id2_pri_lvl;
+	u8	queue_id2_bw_weight;
+	u8	queue_id3;
+	__le32	queue_id3_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id3_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id3_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id3_pri_lvl;
-	u8 queue_id3_bw_weight;
-	u8 queue_id4;
-	__le32 queue_id4_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id4_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id4_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id3_pri_lvl;
+	u8	queue_id3_bw_weight;
+	u8	queue_id4;
+	__le32	queue_id4_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id4_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id4_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id4_pri_lvl;
-	u8 queue_id4_bw_weight;
-	u8 queue_id5;
-	__le32 queue_id5_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id5_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id5_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id4_pri_lvl;
+	u8	queue_id4_bw_weight;
+	u8	queue_id5;
+	__le32	queue_id5_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id5_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id5_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id5_pri_lvl;
-	u8 queue_id5_bw_weight;
-	u8 queue_id6;
-	__le32 queue_id6_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id6_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id6_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id5_pri_lvl;
+	u8	queue_id5_bw_weight;
+	u8	queue_id6;
+	__le32	queue_id6_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id6_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id6_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id6_pri_lvl;
-	u8 queue_id6_bw_weight;
-	u8 queue_id7;
-	__le32 queue_id7_min_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
-	__le32 queue_id7_max_bw;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE	    0x10000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS  (0x0UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 queue_id7_tsa_assign;
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP      0x0UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS     0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id6_pri_lvl;
+	u8	queue_id6_bw_weight;
+	u8	queue_id7;
+	__le32	queue_id7_min_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+	__le32	queue_id7_max_bw;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT              0
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE                     0x10000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST                 QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST         QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	queue_id7_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP             0x0UL
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS            0x1UL
 	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
-	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
-	u8 queue_id7_pri_lvl;
-	u8 queue_id7_bw_weight;
-	u8 unused_1[5];
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST  0xffUL
+	u8	queue_id7_pri_lvl;
+	u8	queue_id7_bw_weight;
+	u8	unused_1[5];
 };
 
-/* Output (16 bytes) */
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
 struct hwrm_queue_cos2bw_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_queue_dscp_qcaps */
-/* Input (24 bytes) */
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
 struct hwrm_queue_dscp_qcaps_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 port_id;
-	u8 unused_0[7];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	port_id;
+	u8	unused_0[7];
 };
 
-/* Output (16 bytes) */
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
 struct hwrm_queue_dscp_qcaps_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 num_dscp_bits;
-	u8 unused_0;
-	__le16 max_entries;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	num_dscp_bits;
+	u8	unused_0;
+	__le16	max_entries;
+	u8	unused_1[3];
+	u8	valid;
 };
 
-/* hwrm_queue_dscp2pri_qcfg */
-/* Input (32 bytes) */
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
 struct hwrm_queue_dscp2pri_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 dest_data_addr;
-	u8 port_id;
-	u8 unused_0;
-	__le16 dest_data_buffer_size;
-	__le32 unused_1;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	dest_data_addr;
+	u8	port_id;
+	u8	unused_0;
+	__le16	dest_data_buffer_size;
+	u8	unused_1[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
 struct hwrm_queue_dscp2pri_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 entry_cnt;
-	u8 default_pri;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	entry_cnt;
+	u8	default_pri;
+	u8	unused_0[4];
+	u8	valid;
 };
 
-/* hwrm_queue_dscp2pri_cfg */
-/* Input (40 bytes) */
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
 struct hwrm_queue_dscp2pri_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 src_data_addr;
-	__le32 flags;
-	#define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI    0x1UL
-	__le32 enables;
-	#define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI	    0x1UL
-	u8 port_id;
-	u8 default_pri;
-	__le16 entry_cnt;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	src_data_addr;
+	__le32	flags;
+	#define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI     0x1UL
+	__le32	enables;
+	#define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI     0x1UL
+	u8	port_id;
+	u8	default_pri;
+	__le16	entry_cnt;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
 struct hwrm_queue_dscp2pri_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_vnic_alloc */
-/* Input (24 bytes) */
+/* hwrm_vnic_alloc_input (size:192b/24B) */
 struct hwrm_vnic_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define VNIC_ALLOC_REQ_FLAGS_DEFAULT			    0x1UL
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define VNIC_ALLOC_REQ_FLAGS_DEFAULT     0x1UL
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_vnic_alloc_output (size:128b/16B) */
 struct hwrm_vnic_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 vnic_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	vnic_id;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_vnic_free */
-/* Input (24 bytes) */
+/* hwrm_vnic_free_input (size:192b/24B) */
 struct hwrm_vnic_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 vnic_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	vnic_id;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_vnic_free_output (size:128b/16B) */
 struct hwrm_vnic_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_vnic_cfg */
-/* Input (40 bytes) */
+/* hwrm_vnic_cfg_input (size:320b/40B) */
 struct hwrm_vnic_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define VNIC_CFG_REQ_FLAGS_DEFAULT			    0x1UL
-	#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE		    0x2UL
-	#define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE		    0x4UL
-	#define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE		    0x8UL
-	#define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE		    0x10UL
-	#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE		    0x20UL
-	#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
-	__le32 enables;
-	#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP		    0x1UL
-	#define VNIC_CFG_REQ_ENABLES_RSS_RULE			    0x2UL
-	#define VNIC_CFG_REQ_ENABLES_COS_RULE			    0x4UL
-	#define VNIC_CFG_REQ_ENABLES_LB_RULE			    0x8UL
-	#define VNIC_CFG_REQ_ENABLES_MRU			    0x10UL
-	__le16 vnic_id;
-	__le16 dflt_ring_grp;
-	__le16 rss_rule;
-	__le16 cos_rule;
-	__le16 lb_rule;
-	__le16 mru;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define VNIC_CFG_REQ_FLAGS_DEFAULT                              0x1UL
+	#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE                      0x2UL
+	#define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE                        0x4UL
+	#define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE                  0x8UL
+	#define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE                  0x10UL
+	#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE                     0x20UL
+	#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE     0x40UL
+	__le32	enables;
+	#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP     0x1UL
+	#define VNIC_CFG_REQ_ENABLES_RSS_RULE          0x2UL
+	#define VNIC_CFG_REQ_ENABLES_COS_RULE          0x4UL
+	#define VNIC_CFG_REQ_ENABLES_LB_RULE           0x8UL
+	#define VNIC_CFG_REQ_ENABLES_MRU               0x10UL
+	__le16	vnic_id;
+	__le16	dflt_ring_grp;
+	__le16	rss_rule;
+	__le16	cos_rule;
+	__le16	lb_rule;
+	__le16	mru;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_vnic_cfg_output (size:128b/16B) */
 struct hwrm_vnic_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_vnic_qcaps */
-/* Input (24 bytes) */
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
 struct hwrm_vnic_qcaps_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	u8	unused_0[4];
 };
 
-/* Output (24 bytes) */
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
 struct hwrm_vnic_qcaps_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 mru;
-	u8 unused_0;
-	u8 unused_1;
-	__le32 flags;
-	#define VNIC_QCAPS_RESP_FLAGS_UNUSED			    0x1UL
-	#define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP		    0x2UL
-	#define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP		    0x4UL
-	#define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP	    0x8UL
-	#define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP	    0x10UL
-	#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP		    0x20UL
-	#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRROING_CAPABLE_VNIC_CAP 0x40UL
-	__le32 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 unused_5;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	mru;
+	u8	unused_0[2];
+	__le32	flags;
+	#define VNIC_QCAPS_RESP_FLAGS_UNUSED                              0x1UL
+	#define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP                      0x2UL
+	#define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP                        0x4UL
+	#define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP                  0x8UL
+	#define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP                  0x10UL
+	#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP                     0x20UL
+	#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP     0x40UL
+	u8	unused_1[7];
+	u8	valid;
 };
 
-/* hwrm_vnic_tpa_cfg */
-/* Input (40 bytes) */
+/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
 struct hwrm_vnic_tpa_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define VNIC_TPA_CFG_REQ_FLAGS_TPA			    0x1UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA		    0x2UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE		    0x4UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_GRO			    0x8UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN		    0x10UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ       0x20UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK		    0x40UL
-	#define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK		    0x80UL
-	__le32 enables;
-	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS		    0x1UL
-	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS		    0x2UL
-	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER		    0x4UL
-	#define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN		    0x8UL
-	__le16 vnic_id;
-	__le16 max_agg_segs;
-	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1		   0x0UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2		   0x1UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4		   0x2UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8		   0x3UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX		   0x1fUL
-	__le16 max_aggs;
-	#define VNIC_TPA_CFG_REQ_MAX_AGGS_1			   0x0UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGGS_2			   0x1UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGGS_4			   0x2UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGGS_8			   0x3UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGGS_16			   0x4UL
-	#define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX			   0x7UL
-	u8 unused_0;
-	u8 unused_1;
-	__le32 max_agg_timer;
-	__le32 min_agg_len;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define VNIC_TPA_CFG_REQ_FLAGS_TPA                       0x1UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA                 0x2UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE            0x4UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_GRO                       0x8UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN              0x10UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ     0x20UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK            0x40UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK             0x80UL
+	__le32	enables;
+	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS      0x1UL
+	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS          0x2UL
+	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER     0x4UL
+	#define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN       0x8UL
+	__le16	vnic_id;
+	__le16	max_agg_segs;
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1   0x0UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2   0x1UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4   0x2UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8   0x3UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
+	__le16	max_aggs;
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_1   0x0UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_2   0x1UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_4   0x2UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_8   0x3UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_16  0x4UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
+	u8	unused_0[2];
+	__le32	max_agg_timer;
+	__le32	min_agg_len;
 };
 
-/* Output (16 bytes) */
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
 struct hwrm_vnic_tpa_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_vnic_rss_cfg */
-/* Input (48 bytes) */
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	vnic_id;
+	u8	unused_0[6];
+};
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	flags;
+	#define VNIC_TPA_QCFG_RESP_FLAGS_TPA                       0x1UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA                 0x2UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE            0x4UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_GRO                       0x8UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN              0x10UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ     0x20UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK            0x40UL
+	#define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK             0x80UL
+	__le16	max_agg_segs;
+	#define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1   0x0UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2   0x1UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4   0x2UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8   0x3UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
+	__le16	max_aggs;
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_1   0x0UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_2   0x1UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_4   0x2UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_8   0x3UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_16  0x4UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
+	#define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
+	__le32	max_agg_timer;
+	__le32	min_agg_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
 struct hwrm_vnic_rss_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 hash_type;
-	#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4		    0x1UL
-	#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4		    0x2UL
-	#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4		    0x4UL
-	#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6		    0x8UL
-	#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6		    0x10UL
-	#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6		    0x20UL
-	__le32 unused_0;
-	__le64 ring_grp_tbl_addr;
-	__le64 hash_key_tbl_addr;
-	__le16 rss_ctx_idx;
-	__le16 unused_1[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	hash_type;
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4         0x1UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4     0x2UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4     0x4UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6         0x8UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6     0x10UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6     0x20UL
+	u8	unused_0[4];
+	__le64	ring_grp_tbl_addr;
+	__le64	hash_key_tbl_addr;
+	__le16	rss_ctx_idx;
+	u8	unused_1[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
 struct hwrm_vnic_rss_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_vnic_plcmodes_cfg */
-/* Input (40 bytes) */
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
 struct hwrm_vnic_plcmodes_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT      0x1UL
-	#define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT	    0x2UL
-	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4		    0x4UL
-	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6		    0x8UL
-	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE		    0x10UL
-	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE		    0x20UL
-	__le32 enables;
-	#define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID   0x1UL
-	#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID     0x2UL
-	#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID  0x4UL
-	__le32 vnic_id;
-	__le16 jumbo_thresh;
-	__le16 hds_offset;
-	__le16 hds_threshold;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT     0x1UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT       0x2UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4              0x4UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6              0x8UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE              0x10UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE              0x20UL
+	__le32	enables;
+	#define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID      0x1UL
+	#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID        0x2UL
+	#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID     0x4UL
+	__le32	vnic_id;
+	__le16	jumbo_thresh;
+	__le16	hds_offset;
+	__le16	hds_threshold;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
 struct hwrm_vnic_plcmodes_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_vnic_rss_cos_lb_ctx_alloc */
-/* Input (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 rss_cos_lb_ctx_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	rss_cos_lb_ctx_id;
+	u8	unused_0[5];
+	u8	valid;
 };
 
-/* hwrm_vnic_rss_cos_lb_ctx_free */
-/* Input (24 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
 struct hwrm_vnic_rss_cos_lb_ctx_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 rss_cos_lb_ctx_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	rss_cos_lb_ctx_id;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
 struct hwrm_vnic_rss_cos_lb_ctx_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_ring_alloc */
-/* Input (80 bytes) */
+/* hwrm_ring_alloc_input (size:640b/80B) */
 struct hwrm_ring_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define RING_ALLOC_REQ_ENABLES_RESERVED1		    0x1UL
-	#define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG		    0x2UL
-	#define RING_ALLOC_REQ_ENABLES_RESERVED3		    0x4UL
-	#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID	    0x8UL
-	#define RING_ALLOC_REQ_ENABLES_RESERVED4		    0x10UL
-	#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID		    0x20UL
-	u8 ring_type;
-	#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL		   0x0UL
-	#define RING_ALLOC_REQ_RING_TYPE_TX			   0x1UL
-	#define RING_ALLOC_REQ_RING_TYPE_RX			   0x2UL
-	#define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL		   0x3UL
-	u8 unused_0;
-	__le16 unused_1;
-	__le64 page_tbl_addr;
-	__le32 fbo;
-	u8 page_size;
-	u8 page_tbl_depth;
-	u8 unused_2;
-	u8 unused_3;
-	__le32 length;
-	__le16 logical_id;
-	__le16 cmpl_ring_id;
-	__le16 queue_id;
-	u8 unused_4;
-	u8 unused_5;
-	__le32 reserved1;
-	__le16 ring_arb_cfg;
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK	    0xfUL
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT	    0
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP	   (0x1UL << 0)
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ	   (0x2UL << 0)
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST    RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
-	#define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK		    0xf0UL
-	#define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT		    4
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK  0xff00UL
-	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT   8
-	u8 unused_6;
-	u8 unused_7;
-	__le32 reserved3;
-	__le32 stat_ctx_id;
-	__le32 reserved4;
-	__le32 max_bw;
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK		    0xfffffffUL
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT		    0
-	#define RING_ALLOC_REQ_MAX_BW_SCALE			    0x10000000UL
-	#define RING_ALLOC_REQ_MAX_BW_SCALE_BITS		   (0x0UL << 28)
-	#define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES		   (0x1UL << 28)
-	#define RING_ALLOC_REQ_MAX_BW_SCALE_LAST    RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK	    0xe0000000UL
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT	    29
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA	   (0x0UL << 29)
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO	   (0x2UL << 29)
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE	   (0x4UL << 29)
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA	   (0x6UL << 29)
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG          0x2UL
+	#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID     0x8UL
+	#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID          0x20UL
+	u8	ring_type;
+	#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL   0x0UL
+	#define RING_ALLOC_REQ_RING_TYPE_TX        0x1UL
+	#define RING_ALLOC_REQ_RING_TYPE_RX        0x2UL
+	#define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+	#define RING_ALLOC_REQ_RING_TYPE_LAST     RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL
+	u8	unused_0[3];
+	__le64	page_tbl_addr;
+	__le32	fbo;
+	u8	page_size;
+	u8	page_tbl_depth;
+	u8	unused_1[2];
+	__le32	length;
+	__le16	logical_id;
+	__le16	cmpl_ring_id;
+	__le16	queue_id;
+	u8	unused_2[2];
+	__le32	reserved1;
+	__le16	ring_arb_cfg;
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK      0xfUL
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT       0
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP          0x1UL
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ         0x2UL
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST       RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+	#define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK            0xf0UL
+	#define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT             4
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+	#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+	__le16	unused_3;
+	__le32	reserved3;
+	__le32	stat_ctx_id;
+	__le32	reserved4;
+	__le32	max_bw;
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK             0xfffffffUL
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT              0
+	#define RING_ALLOC_REQ_MAX_BW_SCALE                     0x10000000UL
+	#define RING_ALLOC_REQ_MAX_BW_SCALE_BITS                  (0x0UL << 28)
+	#define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES                 (0x1UL << 28)
+	#define RING_ALLOC_REQ_MAX_BW_SCALE_LAST                 RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK        0xe0000000UL
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT         29
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA          (0x0UL << 29)
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO          (0x2UL << 29)
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE          (0x4UL << 29)
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA          (0x6UL << 29)
 	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
 	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
-	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST    RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
-	u8 int_mode;
-	#define RING_ALLOC_REQ_INT_MODE_LEGACY			   0x0UL
-	#define RING_ALLOC_REQ_INT_MODE_RSVD			   0x1UL
-	#define RING_ALLOC_REQ_INT_MODE_MSIX			   0x2UL
-	#define RING_ALLOC_REQ_INT_MODE_POLL			   0x3UL
-	u8 unused_8[3];
+	#define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST         RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+	u8	int_mode;
+	#define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+	#define RING_ALLOC_REQ_INT_MODE_RSVD   0x1UL
+	#define RING_ALLOC_REQ_INT_MODE_MSIX   0x2UL
+	#define RING_ALLOC_REQ_INT_MODE_POLL   0x3UL
+	#define RING_ALLOC_REQ_INT_MODE_LAST  RING_ALLOC_REQ_INT_MODE_POLL
+	u8	unused_4[3];
 };
 
-/* Output (16 bytes) */
+/* hwrm_ring_alloc_output (size:128b/16B) */
 struct hwrm_ring_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 ring_id;
-	__le16 logical_ring_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	ring_id;
+	__le16	logical_ring_id;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_ring_free */
-/* Input (24 bytes) */
+/* hwrm_ring_free_input (size:192b/24B) */
 struct hwrm_ring_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 ring_type;
-	#define RING_FREE_REQ_RING_TYPE_L2_CMPL		   0x0UL
-	#define RING_FREE_REQ_RING_TYPE_TX			   0x1UL
-	#define RING_FREE_REQ_RING_TYPE_RX			   0x2UL
-	#define RING_FREE_REQ_RING_TYPE_ROCE_CMPL		   0x3UL
-	u8 unused_0;
-	__le16 ring_id;
-	__le32 unused_1;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	ring_type;
+	#define RING_FREE_REQ_RING_TYPE_L2_CMPL   0x0UL
+	#define RING_FREE_REQ_RING_TYPE_TX        0x1UL
+	#define RING_FREE_REQ_RING_TYPE_RX        0x2UL
+	#define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+	#define RING_FREE_REQ_RING_TYPE_LAST     RING_FREE_REQ_RING_TYPE_ROCE_CMPL
+	u8	unused_0;
+	__le16	ring_id;
+	u8	unused_1[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_ring_free_output (size:128b/16B) */
 struct hwrm_ring_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_ring_cmpl_ring_qaggint_params */
-/* Input (24 bytes) */
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
 struct hwrm_ring_cmpl_ring_qaggint_params_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 ring_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	ring_id;
+	u8	unused_0[6];
 };
 
-/* Output (32 bytes) */
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
 struct hwrm_ring_cmpl_ring_qaggint_params_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 flags;
-	#define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
-	#define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
-	__le16 num_cmpl_dma_aggr;
-	__le16 num_cmpl_dma_aggr_during_int;
-	__le16 cmpl_aggr_dma_tmr;
-	__le16 cmpl_aggr_dma_tmr_during_int;
-	__le16 int_lat_tmr_min;
-	__le16 int_lat_tmr_max;
-	__le16 num_cmpl_aggr_int;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	flags;
+	#define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET     0x1UL
+	#define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE       0x2UL
+	__le16	num_cmpl_dma_aggr;
+	__le16	num_cmpl_dma_aggr_during_int;
+	__le16	cmpl_aggr_dma_tmr;
+	__le16	cmpl_aggr_dma_tmr_during_int;
+	__le16	int_lat_tmr_min;
+	__le16	int_lat_tmr_max;
+	__le16	num_cmpl_aggr_int;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_ring_cmpl_ring_cfg_aggint_params */
-/* Input (40 bytes) */
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 ring_id;
-	__le16 flags;
-	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
-	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
-	__le16 num_cmpl_dma_aggr;
-	__le16 num_cmpl_dma_aggr_during_int;
-	__le16 cmpl_aggr_dma_tmr;
-	__le16 cmpl_aggr_dma_tmr_during_int;
-	__le16 int_lat_tmr_min;
-	__le16 int_lat_tmr_max;
-	__le16 num_cmpl_aggr_int;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	ring_id;
+	__le16	flags;
+	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET     0x1UL
+	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE       0x2UL
+	__le16	num_cmpl_dma_aggr;
+	__le16	num_cmpl_dma_aggr_during_int;
+	__le16	cmpl_aggr_dma_tmr;
+	__le16	cmpl_aggr_dma_tmr_during_int;
+	__le16	int_lat_tmr_min;
+	__le16	int_lat_tmr_max;
+	__le16	num_cmpl_aggr_int;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
 struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_ring_reset */
-/* Input (24 bytes) */
+/* hwrm_ring_reset_input (size:192b/24B) */
 struct hwrm_ring_reset_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 ring_type;
-	#define RING_RESET_REQ_RING_TYPE_L2_CMPL		   0x0UL
-	#define RING_RESET_REQ_RING_TYPE_TX			   0x1UL
-	#define RING_RESET_REQ_RING_TYPE_RX			   0x2UL
-	#define RING_RESET_REQ_RING_TYPE_ROCE_CMPL		   0x3UL
-	u8 unused_0;
-	__le16 ring_id;
-	__le32 unused_1;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	ring_type;
+	#define RING_RESET_REQ_RING_TYPE_L2_CMPL   0x0UL
+	#define RING_RESET_REQ_RING_TYPE_TX        0x1UL
+	#define RING_RESET_REQ_RING_TYPE_RX        0x2UL
+	#define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+	#define RING_RESET_REQ_RING_TYPE_LAST     RING_RESET_REQ_RING_TYPE_ROCE_CMPL
+	u8	unused_0;
+	__le16	ring_id;
+	u8	unused_1[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_ring_reset_output (size:128b/16B) */
 struct hwrm_ring_reset_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_ring_grp_alloc */
-/* Input (24 bytes) */
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
 struct hwrm_ring_grp_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 cr;
-	__le16 rr;
-	__le16 ar;
-	__le16 sc;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	cr;
+	__le16	rr;
+	__le16	ar;
+	__le16	sc;
 };
 
-/* Output (16 bytes) */
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
 struct hwrm_ring_grp_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 ring_group_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	ring_group_id;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_ring_grp_free */
-/* Input (24 bytes) */
+/* hwrm_ring_grp_free_input (size:192b/24B) */
 struct hwrm_ring_grp_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 ring_group_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	ring_group_id;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_ring_grp_free_output (size:128b/16B) */
 struct hwrm_ring_grp_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_cfa_l2_filter_alloc */
-/* Input (96 bytes) */
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
 struct hwrm_cfa_l2_filter_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH		    0x1UL
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST    CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK		    0x2UL
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP		    0x4UL
-	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST	    0x8UL
-	__le32 enables;
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR	    0x1UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK       0x2UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN	    0x4UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK      0x8UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN	    0x10UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK      0x20UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR	    0x40UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK     0x80UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN	    0x100UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK    0x200UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN	    0x400UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK    0x800UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE	    0x1000UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID		    0x2000UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE	    0x4000UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID		    0x8000UL
-	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID     0x10000UL
-	u8 l2_addr[6];
-	u8 unused_0;
-	u8 unused_1;
-	u8 l2_addr_mask[6];
-	__le16 l2_ovlan;
-	__le16 l2_ovlan_mask;
-	__le16 l2_ivlan;
-	__le16 l2_ivlan_mask;
-	u8 unused_2;
-	u8 unused_3;
-	u8 t_l2_addr[6];
-	u8 unused_4;
-	u8 unused_5;
-	u8 t_l2_addr_mask[6];
-	__le16 t_l2_ovlan;
-	__le16 t_l2_ovlan_mask;
-	__le16 t_l2_ivlan;
-	__le16 t_l2_ivlan_mask;
-	u8 src_type;
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT		   0x0UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF		   0x1UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF		   0x2UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC		   0x3UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG		   0x4UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE		   0x5UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO		   0x6UL
-	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG		   0x7UL
-	u8 unused_6;
-	__le32 src_id;
-	u8 tunnel_type;
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL     0x0UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN	   0x1UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE	   0x2UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE	   0x3UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP	   0x4UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE	   0x5UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS	   0x6UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT	   0x7UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE	   0x8UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4      0x9UL
-	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL     0xffUL
-	u8 unused_7;
-	__le16 dst_id;
-	__le16 mirror_vnic_id;
-	u8 pri_hint;
-	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER	   0x0UL
-	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER     0x1UL
-	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER     0x2UL
-	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX		   0x3UL
-	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN		   0x4UL
-	u8 unused_8;
-	__le32 unused_9;
-	__le64 l2_filter_id_hint;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH          0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX         0x0UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX         0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST      CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK      0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP          0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST     0x8UL
+	__le32	enables;
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR             0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK        0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN            0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK       0x8UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN            0x10UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK       0x20UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR           0x40UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK      0x80UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN          0x100UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK     0x200UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN          0x400UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK     0x800UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE            0x1000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID              0x2000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE         0x4000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID              0x8000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID      0x10000UL
+	u8	l2_addr[6];
+	u8	unused_0[2];
+	u8	l2_addr_mask[6];
+	__le16	l2_ovlan;
+	__le16	l2_ovlan_mask;
+	__le16	l2_ivlan;
+	__le16	l2_ivlan_mask;
+	u8	unused_1[2];
+	u8	t_l2_addr[6];
+	u8	unused_2[2];
+	u8	t_l2_addr_mask[6];
+	__le16	t_l2_ovlan;
+	__le16	t_l2_ovlan_mask;
+	__le16	t_l2_ivlan;
+	__le16	t_l2_ivlan_mask;
+	u8	src_type;
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF    0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF    0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC  0x3UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG  0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE   0x5UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO  0x6UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG  0x7UL
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
+	u8	unused_3;
+	__le32	src_id;
+	u8	tunnel_type;
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     0x3UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    0x5UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      0x6UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       0x7UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     0x8UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4  0x9UL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST     CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+	u8	unused_4;
+	__le16	dst_id;
+	__le16	mirror_vnic_id;
+	u8	pri_hint;
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER    0x0UL
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX          0x3UL
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN          0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST        CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
+	u8	unused_5;
+	__le32	unused_6;
+	__le64	l2_filter_id_hint;
 };
 
-/* Output (24 bytes) */
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
 struct hwrm_cfa_l2_filter_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 l2_filter_id;
-	__le32 flow_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	l2_filter_id;
+	__le32	flow_id;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_cfa_l2_filter_free */
-/* Input (24 bytes) */
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
 struct hwrm_cfa_l2_filter_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 l2_filter_id;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	l2_filter_id;
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
 struct hwrm_cfa_l2_filter_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_cfa_l2_filter_cfg */
-/* Input (40 bytes) */
+/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
 struct hwrm_cfa_l2_filter_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH		    0x1UL
-	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
-	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
-	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST    CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
-	#define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP		    0x2UL
-	__le32 enables;
-	#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID		    0x1UL
-	#define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID   0x2UL
-	__le64 l2_filter_id;
-	__le32 dst_id;
-	__le32 new_mirror_vnic_id;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH     0x1UL
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX    0x0UL
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX    0x1UL
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP     0x2UL
+	__le32	enables;
+	#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID                 0x1UL
+	#define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID     0x2UL
+	__le64	l2_filter_id;
+	__le32	dst_id;
+	__le32	new_mirror_vnic_id;
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
 struct hwrm_cfa_l2_filter_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_cfa_l2_set_rx_mask */
-/* Input (56 bytes) */
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
 struct hwrm_cfa_l2_set_rx_mask_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 vnic_id;
-	__le32 mask;
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_RESERVED		    0x1UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST		    0x2UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST		    0x4UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST		    0x8UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS	    0x10UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST		    0x20UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY		    0x40UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN	    0x80UL
-	#define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN	    0x100UL
-	__le64 mc_tbl_addr;
-	__le32 num_mc_entries;
-	__le32 unused_0;
-	__le64 vlan_tag_tbl_addr;
-	__le32 num_vlan_tags;
-	__le32 unused_1;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	vnic_id;
+	__le32	mask;
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST               0x2UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST           0x4UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST               0x8UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS         0x10UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST           0x20UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY            0x40UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN        0x80UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN     0x100UL
+	__le64	mc_tbl_addr;
+	__le32	num_mc_entries;
+	u8	unused_0[4];
+	__le64	vlan_tag_tbl_addr;
+	__le32	num_vlan_tags;
+	u8	unused_1[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
 struct hwrm_cfa_l2_set_rx_mask_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
 struct hwrm_cfa_l2_set_rx_mask_cmd_err {
-	u8 code;
-	#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN	   0x0UL
+	u8	code;
+	#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN                    0x0UL
 	#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
-	u8 unused_0[7];
+	#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST                      CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
+	u8	unused_0[7];
 };
 
-/* hwrm_cfa_tunnel_filter_alloc */
-/* Input (88 bytes) */
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
 struct hwrm_cfa_tunnel_filter_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK	    0x1UL
-	__le32 enables;
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID   0x1UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR	    0x2UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN       0x4UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR	    0x8UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE   0x10UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR      0x40UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE    0x80UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI	    0x100UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID    0x200UL
-	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
-	__le64 l2_filter_id;
-	u8 l2_addr[6];
-	__le16 l2_ivlan;
-	__le32 l3_addr[4];
-	__le32 t_l3_addr[4];
-	u8 l3_addr_type;
-	u8 t_l3_addr_type;
-	u8 tunnel_type;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK     0x1UL
+	__le32	enables;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID       0x1UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR            0x2UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN           0x4UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR            0x8UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE       0x10UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE     0x20UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR          0x40UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE        0x80UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI                0x100UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID        0x200UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID     0x400UL
+	__le64	l2_filter_id;
+	u8	l2_addr[6];
+	__le16	l2_ivlan;
+	__le32	l3_addr[4];
+	__le32	t_l3_addr[4];
+	u8	l3_addr_type;
+	u8	t_l3_addr_type;
+	u8	tunnel_type;
 	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
 	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     0x1UL
 	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     0x2UL
@@ -4174,158 +4372,204 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
 	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     0x8UL
 	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4  0x9UL
 	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
-	u8 unused_0;
-	__le32 vni;
-	__le32 dst_vnic_id;
-	__le32 mirror_vnic_id;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST     CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+	u8	tunnel_flags;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR     0x1UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1          0x2UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0         0x4UL
+	__le32	vni;
+	__le32	dst_vnic_id;
+	__le32	mirror_vnic_id;
 };
 
-/* Output (24 bytes) */
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
 struct hwrm_cfa_tunnel_filter_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 tunnel_filter_id;
-	__le32 flow_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	tunnel_filter_id;
+	__le32	flow_id;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_cfa_tunnel_filter_free */
-/* Input (24 bytes) */
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
 struct hwrm_cfa_tunnel_filter_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 tunnel_filter_id;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	tunnel_filter_id;
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
 struct hwrm_cfa_tunnel_filter_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_cfa_encap_record_alloc */
-/* Input (32 bytes) */
+/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
+struct hwrm_vxlan_ipv4_hdr {
+	u8	ver_hlen;
+	#define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
+	#define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+	#define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK      0xf0UL
+	#define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT       4
+	u8	tos;
+	__be16	ip_id;
+	__be16	flags_frag_offset;
+	u8	ttl;
+	u8	protocol;
+	__be32	src_ip_addr;
+	__be32	dest_ip_addr;
+};
+
+/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
+struct hwrm_vxlan_ipv6_hdr {
+	__be32	ver_tc_flow_label;
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT         0x1cUL
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK        0xf0000000UL
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT          0x14UL
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK         0xff00000UL
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT  0x0UL
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
+	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST           VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
+	__be16	payload_len;
+	u8	next_hdr;
+	u8	ttl;
+	__be32	src_ip_addr[4];
+	__be32	dest_ip_addr[4];
+};
+
+/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
+struct hwrm_cfa_encap_data_vxlan {
+	u8	src_mac_addr[6];
+	__le16	unused_0;
+	u8	dst_mac_addr[6];
+	u8	num_vlan_tags;
+	u8	unused_1;
+	__be16	ovlan_tpid;
+	__be16	ovlan_tci;
+	__be16	ivlan_tpid;
+	__be16	ivlan_tci;
+	__le32	l3[10];
+	#define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
+	#define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
+	#define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
+	#define CFA_ENCAP_DATA_VXLAN_L3_LAST    CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
+	__be16	src_port;
+	__be16	dst_port;
+	__be32	vni;
+};
+
+/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
 struct hwrm_cfa_encap_record_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK	    0x1UL
-	u8 encap_type;
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN       0x1UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE       0x2UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE       0x3UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP	   0x4UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE      0x5UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS	   0x6UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN	   0x7UL
-	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE       0x8UL
-	u8 unused_0;
-	__le16 unused_1;
-	__le32 encap_data[20];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK     0x1UL
+	u8	encap_type;
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN  0x1UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE  0x2UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE  0x3UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP   0x4UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS   0x6UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN   0x7UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE  0x8UL
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST  CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE
+	u8	unused_0[3];
+	__le32	encap_data[20];
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
 struct hwrm_cfa_encap_record_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 encap_record_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	encap_record_id;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_cfa_encap_record_free */
-/* Input (24 bytes) */
+/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
 struct hwrm_cfa_encap_record_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 encap_record_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	encap_record_id;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
 struct hwrm_cfa_encap_record_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_cfa_ntuple_filter_alloc */
-/* Input (128 bytes) */
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
 struct hwrm_cfa_ntuple_filter_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK	    0x1UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP		    0x2UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER	    0x4UL
-	__le32 enables;
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID   0x1UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE      0x2UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE    0x4UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR    0x8UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE    0x10UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR     0x20UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR     0x80UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL    0x200UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT       0x400UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK  0x800UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT       0x1000UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK  0x2000UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT       0x4000UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID	    0x10000UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR    0x40000UL
-	__le64 l2_filter_id;
-	u8 src_macaddr[6];
-	__be16 ethertype;
-	u8 ip_addr_type;
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN  0x0UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4     0x4UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6     0x6UL
-	u8 ip_protocol;
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN   0x0UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP       0x6UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP       0x11UL
-	__le16 dst_id;
-	__le16 mirror_vnic_id;
-	u8 tunnel_type;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK     0x1UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP         0x2UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER        0x4UL
+	__le32	enables;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID         0x1UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE            0x2UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE          0x4UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR          0x8UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE          0x10UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR           0x20UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK      0x40UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR           0x80UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK      0x100UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL          0x200UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT             0x400UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK        0x800UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT             0x1000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK        0x2000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT             0x4000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID     0x8000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID               0x10000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID       0x20000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR          0x40000UL
+	__le64	l2_filter_id;
+	u8	src_macaddr[6];
+	__be16	ethertype;
+	u8	ip_addr_type;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4    0x4UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6    0x6UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST   CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+	u8	ip_protocol;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP     0x6UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP     0x11UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST   CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+	__le16	dst_id;
+	__le16	mirror_vnic_id;
+	u8	tunnel_type;
 	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
 	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     0x1UL
 	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     0x2UL
@@ -4337,2221 +4581,1723 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
 	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     0x8UL
 	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4  0x9UL
 	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
-	u8 pri_hint;
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER    0x0UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE	   0x1UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW	   0x2UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST      0x3UL
-	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST       0x4UL
-	__be32 src_ipaddr[4];
-	__be32 src_ipaddr_mask[4];
-	__be32 dst_ipaddr[4];
-	__be32 dst_ipaddr_mask[4];
-	__be16 src_port;
-	__be16 src_port_mask;
-	__be16 dst_port;
-	__be16 dst_port_mask;
-	__le64 ntuple_filter_id_hint;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+	u8	pri_hint;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE     0x1UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW     0x2UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST   0x3UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST    0x4UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST     CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
+	__be32	src_ipaddr[4];
+	__be32	src_ipaddr_mask[4];
+	__be32	dst_ipaddr[4];
+	__be32	dst_ipaddr_mask[4];
+	__be16	src_port;
+	__be16	src_port_mask;
+	__be16	dst_port;
+	__be16	dst_port_mask;
+	__le64	ntuple_filter_id_hint;
 };
 
-/* Output (24 bytes) */
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
 struct hwrm_cfa_ntuple_filter_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 ntuple_filter_id;
-	__le32 flow_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	ntuple_filter_id;
+	__le32	flow_id;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
 struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
-	u8 code;
-	#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN      0x0UL
+	u8	code;
+	#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN                   0x0UL
 	#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
-	u8 unused_0[7];
+	#define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST                     CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
+	u8	unused_0[7];
 };
 
-/* hwrm_cfa_ntuple_filter_free */
-/* Input (24 bytes) */
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
 struct hwrm_cfa_ntuple_filter_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 ntuple_filter_id;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	ntuple_filter_id;
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
 struct hwrm_cfa_ntuple_filter_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_cfa_ntuple_filter_cfg */
-/* Input (48 bytes) */
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
 struct hwrm_cfa_ntuple_filter_cfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID       0x1UL
-	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
-	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
-	__le32 unused_0;
-	__le64 ntuple_filter_id;
-	__le32 new_dst_id;
-	__le32 new_mirror_vnic_id;
-	__le16 new_meter_instance_id;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID                0x1UL
+	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID        0x2UL
+	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID     0x4UL
+	u8	unused_0[4];
+	__le64	ntuple_filter_id;
+	__le32	new_dst_id;
+	__le32	new_mirror_vnic_id;
+	__le16	new_meter_instance_id;
 	#define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
-	__le16 unused_1[3];
+	#define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST   CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
+	u8	unused_1[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
 struct hwrm_cfa_ntuple_filter_cfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_cfa_decap_filter_alloc */
-/* Input (104 bytes) */
+/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
 struct hwrm_cfa_decap_filter_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	#define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL	    0x1UL
-	__le32 enables;
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE     0x1UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID       0x2UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR     0x4UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR     0x8UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID       0x10UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID       0x20UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID     0x40UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID     0x80UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE       0x100UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR      0x200UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR      0x400UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE     0x800UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL     0x1000UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT	    0x2000UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT	    0x4000UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID	    0x8000UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID  0x10000UL
-	__be32 tunnel_id;
-	u8 tunnel_type;
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL  0x0UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN      0x1UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE      0x2UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE      0x3UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP       0x4UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE     0x5UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS       0x6UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT	   0x7UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE      0x8UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4   0x9UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL  0xffUL
-	u8 unused_0;
-	__le16 unused_1;
-	u8 src_macaddr[6];
-	u8 unused_2;
-	u8 unused_3;
-	u8 dst_macaddr[6];
-	__be16 ovlan_vid;
-	__be16 ivlan_vid;
-	__be16 t_ovlan_vid;
-	__be16 t_ivlan_vid;
-	__be16 ethertype;
-	u8 ip_addr_type;
-	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN   0x0UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4      0x4UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6      0x6UL
-	u8 ip_protocol;
-	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN    0x0UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP	   0x6UL
-	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP	   0x11UL
-	u8 unused_4;
-	u8 unused_5;
-	u8 unused_6[3];
-	u8 unused_7;
-	__be32 src_ipaddr[4];
-	__be32 dst_ipaddr[4];
-	__be16 src_port;
-	__be16 dst_port;
-	__le16 dst_id;
-	__le16 l2_ctxt_ref_id;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	#define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL     0x1UL
+	__le32	enables;
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE        0x1UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID          0x2UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR        0x4UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR        0x8UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID          0x10UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID          0x20UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID        0x40UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID        0x80UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE          0x100UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR         0x200UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR         0x400UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE        0x800UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL        0x1000UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT           0x2000UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT           0x4000UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID             0x8000UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID     0x10000UL
+	__be32	tunnel_id;
+	u8	tunnel_type;
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     0x1UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     0x2UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     0x3UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      0x4UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    0x5UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      0x6UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       0x7UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     0x8UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4  0x9UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST     CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+	u8	unused_0;
+	__le16	unused_1;
+	u8	src_macaddr[6];
+	u8	unused_2[2];
+	u8	dst_macaddr[6];
+	__be16	ovlan_vid;
+	__be16	ivlan_vid;
+	__be16	t_ovlan_vid;
+	__be16	t_ivlan_vid;
+	__be16	ethertype;
+	u8	ip_addr_type;
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4    0x4UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6    0x6UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST   CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+	u8	ip_protocol;
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP     0x6UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP     0x11UL
+	#define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST   CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+	__le16	unused_3;
+	__le32	unused_4;
+	__be32	src_ipaddr[4];
+	__be32	dst_ipaddr[4];
+	__be16	src_port;
+	__be16	dst_port;
+	__le16	dst_id;
+	__le16	l2_ctxt_ref_id;
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
 struct hwrm_cfa_decap_filter_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 decap_filter_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	decap_filter_id;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_cfa_decap_filter_free */
-/* Input (24 bytes) */
+/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
 struct hwrm_cfa_decap_filter_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 decap_filter_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	decap_filter_id;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
 struct hwrm_cfa_decap_filter_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_cfa_flow_alloc */
-/* Input (128 bytes) */
+/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
 struct hwrm_cfa_flow_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 flags;
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL		    0x1UL
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK		    0x6UL
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT		    1
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE		   (0x0UL << 1)
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE		   (0x1UL << 1)
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO		   (0x2UL << 1)
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST    CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK		    0x38UL
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT		    3
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2		   (0x0UL << 3)
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4		   (0x1UL << 3)
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6		   (0x2UL << 3)
-	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST    CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
-	__le16 src_fid;
-	__le32 tunnel_handle;
-	__le16 action_flags;
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD		    0x1UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE	    0x2UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP		    0x4UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER		    0x8UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL		    0x10UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC	    0x20UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST	    0x40UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS   0x80UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE  0x100UL
-	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT      0x200UL
-	__le16 dst_fid;
-	__be16 l2_rewrite_vlan_tpid;
-	__be16 l2_rewrite_vlan_tci;
-	__le16 act_meter_id;
-	__le16 ref_flow_handle;
-	__be16 ethertype;
-	__be16 outer_vlan_tci;
-	__be16 dmac[3];
-	__be16 inner_vlan_tci;
-	__be16 smac[3];
-	u8 ip_dst_mask_len;
-	u8 ip_src_mask_len;
-	__be32 ip_dst[4];
-	__be32 ip_src[4];
-	__be16 l4_src_port;
-	__be16 l4_src_port_mask;
-	__be16 l4_dst_port;
-	__be16 l4_dst_port_mask;
-	__be32 nat_ip_address[4];
-	__be16 l2_rewrite_dmac[3];
-	__be16 nat_port;
-	__be16 l2_rewrite_smac[3];
-	u8 ip_proto;
-	u8 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	flags;
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL       0x1UL
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE  (0x0UL << 1)
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE   (0x1UL << 1)
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO   (0x2UL << 1)
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2    (0x0UL << 3)
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4  (0x1UL << 3)
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6  (0x2UL << 3)
+	#define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+	__le16	src_fid;
+	__le32	tunnel_handle;
+	__le16	action_flags;
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD                   0x1UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE               0x2UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP                  0x4UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER                 0x8UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL                0x10UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC               0x20UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST              0x40UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS      0x80UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE     0x100UL
+	#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT         0x200UL
+	__le16	dst_fid;
+	__be16	l2_rewrite_vlan_tpid;
+	__be16	l2_rewrite_vlan_tci;
+	__le16	act_meter_id;
+	__le16	ref_flow_handle;
+	__be16	ethertype;
+	__be16	outer_vlan_tci;
+	__be16	dmac[3];
+	__be16	inner_vlan_tci;
+	__be16	smac[3];
+	u8	ip_dst_mask_len;
+	u8	ip_src_mask_len;
+	__be32	ip_dst[4];
+	__be32	ip_src[4];
+	__be16	l4_src_port;
+	__be16	l4_src_port_mask;
+	__be16	l4_dst_port;
+	__be16	l4_dst_port_mask;
+	__be32	nat_ip_address[4];
+	__be16	l2_rewrite_dmac[3];
+	__be16	nat_port;
+	__be16	l2_rewrite_smac[3];
+	u8	ip_proto;
+	u8	unused_0;
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
 struct hwrm_cfa_flow_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 flow_handle;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	flow_handle;
+	u8	unused_0[5];
+	u8	valid;
 };
 
-/* hwrm_cfa_flow_free */
-/* Input (24 bytes) */
+/* hwrm_cfa_flow_free_input (size:192b/24B) */
 struct hwrm_cfa_flow_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 flow_handle;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	flow_handle;
+	u8	unused_0[6];
 };
 
-/* Output (32 bytes) */
+/* hwrm_cfa_flow_free_output (size:256b/32B) */
 struct hwrm_cfa_flow_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 packet;
-	__le64 byte;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	packet;
+	__le64	byte;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_cfa_flow_stats */
-/* Input (40 bytes) */
+/* hwrm_cfa_flow_stats_input (size:320b/40B) */
 struct hwrm_cfa_flow_stats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 num_flows;
-	__le16 flow_handle_0;
-	__le16 flow_handle_1;
-	__le16 flow_handle_2;
-	__le16 flow_handle_3;
-	__le16 flow_handle_4;
-	__le16 flow_handle_5;
-	__le16 flow_handle_6;
-	__le16 flow_handle_7;
-	__le16 flow_handle_8;
-	__le16 flow_handle_9;
-	__le16 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	num_flows;
+	__le16	flow_handle_0;
+	__le16	flow_handle_1;
+	__le16	flow_handle_2;
+	__le16	flow_handle_3;
+	__le16	flow_handle_4;
+	__le16	flow_handle_5;
+	__le16	flow_handle_6;
+	__le16	flow_handle_7;
+	__le16	flow_handle_8;
+	__le16	flow_handle_9;
+	u8	unused_0[2];
 };
 
-/* Output (176 bytes) */
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
 struct hwrm_cfa_flow_stats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 packet_0;
-	__le64 packet_1;
-	__le64 packet_2;
-	__le64 packet_3;
-	__le64 packet_4;
-	__le64 packet_5;
-	__le64 packet_6;
-	__le64 packet_7;
-	__le64 packet_8;
-	__le64 packet_9;
-	__le64 byte_0;
-	__le64 byte_1;
-	__le64 byte_2;
-	__le64 byte_3;
-	__le64 byte_4;
-	__le64 byte_5;
-	__le64 byte_6;
-	__le64 byte_7;
-	__le64 byte_8;
-	__le64 byte_9;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	packet_0;
+	__le64	packet_1;
+	__le64	packet_2;
+	__le64	packet_3;
+	__le64	packet_4;
+	__le64	packet_5;
+	__le64	packet_6;
+	__le64	packet_7;
+	__le64	packet_8;
+	__le64	packet_9;
+	__le64	byte_0;
+	__le64	byte_1;
+	__le64	byte_2;
+	__le64	byte_3;
+	__le64	byte_4;
+	__le64	byte_5;
+	__le64	byte_6;
+	__le64	byte_7;
+	__le64	byte_8;
+	__le64	byte_9;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_cfa_vfr_alloc */
-/* Input (32 bytes) */
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
 struct hwrm_cfa_vfr_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 vf_id;
-	__le16 reserved;
-	__le32 unused_0;
-	char vfr_name[32];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	vf_id;
+	__le16	reserved;
+	u8	unused_0[4];
+	char	vfr_name[32];
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
 struct hwrm_cfa_vfr_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 rx_cfa_code;
-	__le16 tx_cfa_action;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	rx_cfa_code;
+	__le16	tx_cfa_action;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_cfa_vfr_free */
-/* Input (24 bytes) */
+/* hwrm_cfa_vfr_free_input (size:384b/48B) */
 struct hwrm_cfa_vfr_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	char vfr_name[32];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	char	vfr_name[32];
 };
 
-/* Output (16 bytes) */
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
 struct hwrm_cfa_vfr_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_tunnel_dst_port_query */
-/* Input (24 bytes) */
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
 struct hwrm_tunnel_dst_port_query_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 tunnel_type;
-	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN       0x1UL
-	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE      0x5UL
-	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4    0x9UL
-	u8 unused_0[7];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	tunnel_type;
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN    0x1UL
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE   0x5UL
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST    TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4
+	u8	unused_0[7];
 };
 
-/* Output (16 bytes) */
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
 struct hwrm_tunnel_dst_port_query_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 tunnel_dst_port_id;
-	__be16 tunnel_dst_port_val;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	tunnel_dst_port_id;
+	__be16	tunnel_dst_port_val;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_tunnel_dst_port_alloc */
-/* Input (24 bytes) */
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
 struct hwrm_tunnel_dst_port_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 tunnel_type;
-	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN       0x1UL
-	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE      0x5UL
-	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4    0x9UL
-	u8 unused_0;
-	__be16 tunnel_dst_port_val;
-	__be32 unused_1;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	tunnel_type;
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN    0x1UL
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE   0x5UL
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST    TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4
+	u8	unused_0;
+	__be16	tunnel_dst_port_val;
+	u8	unused_1[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
 struct hwrm_tunnel_dst_port_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 tunnel_dst_port_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	tunnel_dst_port_id;
+	u8	unused_0[5];
+	u8	valid;
 };
 
-/* hwrm_tunnel_dst_port_free */
-/* Input (24 bytes) */
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
 struct hwrm_tunnel_dst_port_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 tunnel_type;
-	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN	   0x1UL
-	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE       0x5UL
-	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4     0x9UL
-	u8 unused_0;
-	__le16 tunnel_dst_port_id;
-	__le32 unused_1;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	tunnel_type;
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN    0x1UL
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE   0x5UL
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST    TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4
+	u8	unused_0;
+	__le16	tunnel_dst_port_id;
+	u8	unused_1[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
 struct hwrm_tunnel_dst_port_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_1[7];
+	u8	valid;
 };
 
-/* hwrm_stat_ctx_alloc */
-/* Input (32 bytes) */
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+	__le64	rx_ucast_pkts;
+	__le64	rx_mcast_pkts;
+	__le64	rx_bcast_pkts;
+	__le64	rx_discard_pkts;
+	__le64	rx_drop_pkts;
+	__le64	rx_ucast_bytes;
+	__le64	rx_mcast_bytes;
+	__le64	rx_bcast_bytes;
+	__le64	tx_ucast_pkts;
+	__le64	tx_mcast_pkts;
+	__le64	tx_bcast_pkts;
+	__le64	tx_discard_pkts;
+	__le64	tx_drop_pkts;
+	__le64	tx_ucast_bytes;
+	__le64	tx_mcast_bytes;
+	__le64	tx_bcast_bytes;
+	__le64	tpa_pkts;
+	__le64	tpa_bytes;
+	__le64	tpa_events;
+	__le64	tpa_aborts;
+};
+
+/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
 struct hwrm_stat_ctx_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 stats_dma_addr;
-	__le32 update_period_ms;
-	u8 stat_ctx_flags;
-	#define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE		    0x1UL
-	u8 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	stats_dma_addr;
+	__le32	update_period_ms;
+	u8	stat_ctx_flags;
+	#define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE     0x1UL
+	u8	unused_0[3];
 };
 
-/* Output (16 bytes) */
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
 struct hwrm_stat_ctx_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 stat_ctx_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	stat_ctx_id;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_stat_ctx_free */
-/* Input (24 bytes) */
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
 struct hwrm_stat_ctx_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 stat_ctx_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	stat_ctx_id;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
 struct hwrm_stat_ctx_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 stat_ctx_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	stat_ctx_id;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_stat_ctx_query */
-/* Input (24 bytes) */
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
 struct hwrm_stat_ctx_query_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 stat_ctx_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	stat_ctx_id;
+	u8	unused_0[4];
 };
 
-/* Output (176 bytes) */
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
 struct hwrm_stat_ctx_query_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 tx_ucast_pkts;
-	__le64 tx_mcast_pkts;
-	__le64 tx_bcast_pkts;
-	__le64 tx_err_pkts;
-	__le64 tx_drop_pkts;
-	__le64 tx_ucast_bytes;
-	__le64 tx_mcast_bytes;
-	__le64 tx_bcast_bytes;
-	__le64 rx_ucast_pkts;
-	__le64 rx_mcast_pkts;
-	__le64 rx_bcast_pkts;
-	__le64 rx_err_pkts;
-	__le64 rx_drop_pkts;
-	__le64 rx_ucast_bytes;
-	__le64 rx_mcast_bytes;
-	__le64 rx_bcast_bytes;
-	__le64 rx_agg_pkts;
-	__le64 rx_agg_bytes;
-	__le64 rx_agg_events;
-	__le64 rx_agg_aborts;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	tx_ucast_pkts;
+	__le64	tx_mcast_pkts;
+	__le64	tx_bcast_pkts;
+	__le64	tx_err_pkts;
+	__le64	tx_drop_pkts;
+	__le64	tx_ucast_bytes;
+	__le64	tx_mcast_bytes;
+	__le64	tx_bcast_bytes;
+	__le64	rx_ucast_pkts;
+	__le64	rx_mcast_pkts;
+	__le64	rx_bcast_pkts;
+	__le64	rx_err_pkts;
+	__le64	rx_drop_pkts;
+	__le64	rx_ucast_bytes;
+	__le64	rx_mcast_bytes;
+	__le64	rx_bcast_bytes;
+	__le64	rx_agg_pkts;
+	__le64	rx_agg_bytes;
+	__le64	rx_agg_events;
+	__le64	rx_agg_aborts;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_stat_ctx_clr_stats */
-/* Input (24 bytes) */
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
 struct hwrm_stat_ctx_clr_stats_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 stat_ctx_id;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	stat_ctx_id;
+	u8	unused_0[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
 struct hwrm_stat_ctx_clr_stats_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_fw_reset */
-/* Input (24 bytes) */
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+	__le64	tx_64b_frames;
+	__le64	tx_65b_127b_frames;
+	__le64	tx_128b_255b_frames;
+	__le64	tx_256b_511b_frames;
+	__le64	tx_512b_1023b_frames;
+	__le64	tx_1024b_1518_frames;
+	__le64	tx_good_vlan_frames;
+	__le64	tx_1519b_2047_frames;
+	__le64	tx_2048b_4095b_frames;
+	__le64	tx_4096b_9216b_frames;
+	__le64	tx_9217b_16383b_frames;
+	__le64	tx_good_frames;
+	__le64	tx_total_frames;
+	__le64	tx_ucast_frames;
+	__le64	tx_mcast_frames;
+	__le64	tx_bcast_frames;
+	__le64	tx_pause_frames;
+	__le64	tx_pfc_frames;
+	__le64	tx_jabber_frames;
+	__le64	tx_fcs_err_frames;
+	__le64	tx_control_frames;
+	__le64	tx_oversz_frames;
+	__le64	tx_single_dfrl_frames;
+	__le64	tx_multi_dfrl_frames;
+	__le64	tx_single_coll_frames;
+	__le64	tx_multi_coll_frames;
+	__le64	tx_late_coll_frames;
+	__le64	tx_excessive_coll_frames;
+	__le64	tx_frag_frames;
+	__le64	tx_err;
+	__le64	tx_tagged_frames;
+	__le64	tx_dbl_tagged_frames;
+	__le64	tx_runt_frames;
+	__le64	tx_fifo_underruns;
+	__le64	tx_pfc_ena_frames_pri0;
+	__le64	tx_pfc_ena_frames_pri1;
+	__le64	tx_pfc_ena_frames_pri2;
+	__le64	tx_pfc_ena_frames_pri3;
+	__le64	tx_pfc_ena_frames_pri4;
+	__le64	tx_pfc_ena_frames_pri5;
+	__le64	tx_pfc_ena_frames_pri6;
+	__le64	tx_pfc_ena_frames_pri7;
+	__le64	tx_eee_lpi_events;
+	__le64	tx_eee_lpi_duration;
+	__le64	tx_llfc_logical_msgs;
+	__le64	tx_hcfc_msgs;
+	__le64	tx_total_collisions;
+	__le64	tx_bytes;
+	__le64	tx_xthol_frames;
+	__le64	tx_stat_discard;
+	__le64	tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+	__le64	rx_64b_frames;
+	__le64	rx_65b_127b_frames;
+	__le64	rx_128b_255b_frames;
+	__le64	rx_256b_511b_frames;
+	__le64	rx_512b_1023b_frames;
+	__le64	rx_1024b_1518_frames;
+	__le64	rx_good_vlan_frames;
+	__le64	rx_1519b_2047b_frames;
+	__le64	rx_2048b_4095b_frames;
+	__le64	rx_4096b_9216b_frames;
+	__le64	rx_9217b_16383b_frames;
+	__le64	rx_total_frames;
+	__le64	rx_ucast_frames;
+	__le64	rx_mcast_frames;
+	__le64	rx_bcast_frames;
+	__le64	rx_fcs_err_frames;
+	__le64	rx_ctrl_frames;
+	__le64	rx_pause_frames;
+	__le64	rx_pfc_frames;
+	__le64	rx_unsupported_opcode_frames;
+	__le64	rx_unsupported_da_pausepfc_frames;
+	__le64	rx_wrong_sa_frames;
+	__le64	rx_align_err_frames;
+	__le64	rx_oor_len_frames;
+	__le64	rx_code_err_frames;
+	__le64	rx_false_carrier_frames;
+	__le64	rx_ovrsz_frames;
+	__le64	rx_jbr_frames;
+	__le64	rx_mtu_err_frames;
+	__le64	rx_match_crc_frames;
+	__le64	rx_promiscuous_frames;
+	__le64	rx_tagged_frames;
+	__le64	rx_double_tagged_frames;
+	__le64	rx_trunc_frames;
+	__le64	rx_good_frames;
+	__le64	rx_pfc_xon2xoff_frames_pri0;
+	__le64	rx_pfc_xon2xoff_frames_pri1;
+	__le64	rx_pfc_xon2xoff_frames_pri2;
+	__le64	rx_pfc_xon2xoff_frames_pri3;
+	__le64	rx_pfc_xon2xoff_frames_pri4;
+	__le64	rx_pfc_xon2xoff_frames_pri5;
+	__le64	rx_pfc_xon2xoff_frames_pri6;
+	__le64	rx_pfc_xon2xoff_frames_pri7;
+	__le64	rx_pfc_ena_frames_pri0;
+	__le64	rx_pfc_ena_frames_pri1;
+	__le64	rx_pfc_ena_frames_pri2;
+	__le64	rx_pfc_ena_frames_pri3;
+	__le64	rx_pfc_ena_frames_pri4;
+	__le64	rx_pfc_ena_frames_pri5;
+	__le64	rx_pfc_ena_frames_pri6;
+	__le64	rx_pfc_ena_frames_pri7;
+	__le64	rx_sch_crc_err_frames;
+	__le64	rx_undrsz_frames;
+	__le64	rx_frag_frames;
+	__le64	rx_eee_lpi_events;
+	__le64	rx_eee_lpi_duration;
+	__le64	rx_llfc_physical_msgs;
+	__le64	rx_llfc_logical_msgs;
+	__le64	rx_llfc_msgs_with_crc_err;
+	__le64	rx_hcfc_msgs;
+	__le64	rx_hcfc_msgs_with_crc_err;
+	__le64	rx_bytes;
+	__le64	rx_runt_bytes;
+	__le64	rx_runt_frames;
+	__le64	rx_stat_discard;
+	__le64	rx_stat_err;
+};
+
+/* hwrm_fw_reset_input (size:192b/24B) */
 struct hwrm_fw_reset_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 embedded_proc_type;
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT		   0x0UL
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT		   0x1UL
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL	   0x2UL
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE		   0x3UL
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST		   0x4UL
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP		   0x5UL
-	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP		   0x6UL
-	u8 selfrst_status;
-	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE	   0x0UL
-	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP	   0x1UL
-	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST	   0x2UL
-	u8 host_idx;
-	u8 unused_0[5];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	embedded_proc_type;
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT    0x0UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT    0x1UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE    0x3UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST    0x4UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP      0x5UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP    0x6UL
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST   FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP
+	u8	selfrst_status;
+	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE    0x0UL
+	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP    0x1UL
+	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+	#define FW_RESET_REQ_SELFRST_STATUS_LAST          FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
+	u8	host_idx;
+	u8	unused_0[5];
 };
 
-/* Output (16 bytes) */
+/* hwrm_fw_reset_output (size:128b/16B) */
 struct hwrm_fw_reset_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 selfrst_status;
-	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE	   0x0UL
-	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP	   0x1UL
-	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST       0x2UL
-	u8 unused_0;
-	__le16 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	selfrst_status;
+	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE    0x0UL
+	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP    0x1UL
+	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+	#define FW_RESET_RESP_SELFRST_STATUS_LAST          FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST
+	u8	unused_0[6];
+	u8	valid;
 };
 
-/* hwrm_fw_qstatus */
-/* Input (24 bytes) */
+/* hwrm_fw_qstatus_input (size:192b/24B) */
 struct hwrm_fw_qstatus_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 embedded_proc_type;
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT		   0x0UL
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT		   0x1UL
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL	   0x2UL
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE		   0x3UL
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST		   0x4UL
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP		   0x5UL
-	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP		   0x6UL
-	u8 unused_0[7];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	embedded_proc_type;
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT    0x0UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT    0x1UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE    0x3UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST    0x4UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP      0x5UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP    0x6UL
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST   FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
+	u8	unused_0[7];
 };
 
-/* Output (16 bytes) */
+/* hwrm_fw_qstatus_output (size:128b/16B) */
 struct hwrm_fw_qstatus_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 selfrst_status;
-	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE	   0x0UL
-	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP	   0x1UL
-	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST     0x2UL
-	u8 unused_0;
-	__le16 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	selfrst_status;
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE    0x0UL
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP    0x1UL
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_LAST          FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST
+	u8	unused_0[6];
+	u8	valid;
 };
 
-/* hwrm_fw_set_time */
-/* Input (32 bytes) */
+/* hwrm_fw_set_time_input (size:256b/32B) */
 struct hwrm_fw_set_time_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 year;
-	#define FW_SET_TIME_REQ_YEAR_UNKNOWN			   0x0UL
-	u8 month;
-	u8 day;
-	u8 hour;
-	u8 minute;
-	u8 second;
-	u8 unused_0;
-	__le16 millisecond;
-	__le16 zone;
-	#define FW_SET_TIME_REQ_ZONE_UTC			   0x0UL
-	#define FW_SET_TIME_REQ_ZONE_UNKNOWN			   0xffffUL
-	__le32 unused_1;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	year;
+	#define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+	#define FW_SET_TIME_REQ_YEAR_LAST   FW_SET_TIME_REQ_YEAR_UNKNOWN
+	u8	month;
+	u8	day;
+	u8	hour;
+	u8	minute;
+	u8	second;
+	u8	unused_0;
+	__le16	millisecond;
+	__le16	zone;
+	#define FW_SET_TIME_REQ_ZONE_UTC     0x0UL
+	#define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
+	#define FW_SET_TIME_REQ_ZONE_LAST   FW_SET_TIME_REQ_ZONE_UNKNOWN
+	u8	unused_1[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_fw_set_time_output (size:128b/16B) */
 struct hwrm_fw_set_time_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_fw_set_structured_data */
-/* Input (32 bytes) */
+/* hwrm_struct_hdr (size:128b/16B) */
+struct hwrm_struct_hdr {
+	__le16	struct_id;
+	#define STRUCT_HDR_STRUCT_ID_LLDP_CFG           0x41bUL
+	#define STRUCT_HDR_STRUCT_ID_DCBX_ETS           0x41dUL
+	#define STRUCT_HDR_STRUCT_ID_DCBX_PFC           0x41fUL
+	#define STRUCT_HDR_STRUCT_ID_DCBX_APP           0x421UL
+	#define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+	#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC       0x424UL
+	#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE        0x426UL
+	#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE         0x1UL
+	#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION   0xaUL
+	#define STRUCT_HDR_STRUCT_ID_RSS_V2             0x64UL
+	#define STRUCT_HDR_STRUCT_ID_LAST              STRUCT_HDR_STRUCT_ID_RSS_V2
+	__le16	len;
+	u8	version;
+	u8	count;
+	__le16	subtype;
+	__le16	next_offset;
+	#define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+	u8	unused_0[6];
+};
+
+/* hwrm_struct_data_dcbx_app (size:64b/8B) */
+struct hwrm_struct_data_dcbx_app {
+	__be16	protocol_id;
+	u8	protocol_selector;
+	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE   0x1UL
+	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT     0x2UL
+	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT     0x3UL
+	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST        STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
+	u8	priority;
+	u8	valid;
+	u8	unused_0[3];
+};
+
+/* hwrm_fw_set_structured_data_input (size:256b/32B) */
 struct hwrm_fw_set_structured_data_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 src_data_addr;
-	__le16 data_len;
-	u8 hdr_cnt;
-	u8 unused_0[5];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	src_data_addr;
+	__le16	data_len;
+	u8	hdr_cnt;
+	u8	unused_0[5];
 };
 
-/* Output (16 bytes) */
+/* hwrm_fw_set_structured_data_output (size:128b/16B) */
 struct hwrm_fw_set_structured_data_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
 struct hwrm_fw_set_structured_data_cmd_err {
-	u8 code;
-	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN       0x0UL
-	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT   0x1UL
-	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT       0x2UL
-	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID	   0x3UL
-	u8 unused_0[7];
+	u8	code;
+	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN     0x0UL
+	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT     0x2UL
+	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID      0x3UL
+	#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST       FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+	u8	unused_0[7];
 };
 
-/* hwrm_fw_get_structured_data */
-/* Input (32 bytes) */
+/* hwrm_fw_get_structured_data_input (size:256b/32B) */
 struct hwrm_fw_get_structured_data_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 dest_data_addr;
-	__le16 data_len;
-	__le16 structure_id;
-	__le16 subtype;
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL		   0xffffUL
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	dest_data_addr;
+	__le16	data_len;
+	__le16	structure_id;
+	__le16	subtype;
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED                  0x0UL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL                     0xffffUL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN       0x100UL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER        0x101UL
 	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER  0x201UL
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
-	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
-	u8 count;
-	u8 unused_0;
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN          0x200UL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER           0x201UL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL    0x202UL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL        0x300UL
+	#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST                   FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL
+	u8	count;
+	u8	unused_0;
 };
 
-/* Output (16 bytes) */
+/* hwrm_fw_get_structured_data_output (size:128b/16B) */
 struct hwrm_fw_get_structured_data_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 hdr_cnt;
-	u8 unused_0;
-	__le16 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	hdr_cnt;
+	u8	unused_0[6];
+	u8	valid;
 };
 
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
 struct hwrm_fw_get_structured_data_cmd_err {
-	u8 code;
-	#define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN       0x0UL
-	#define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID	   0x3UL
-	u8 unused_0[7];
+	u8	code;
+	#define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+	#define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID  0x3UL
+	#define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST   FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+	u8	unused_0[7];
 };
 
-/* hwrm_exec_fwd_resp */
-/* Input (128 bytes) */
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
 struct hwrm_exec_fwd_resp_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 encap_request[26];
-	__le16 encap_resp_target_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	encap_request[26];
+	__le16	encap_resp_target_id;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
 struct hwrm_exec_fwd_resp_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_reject_fwd_resp */
-/* Input (128 bytes) */
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
 struct hwrm_reject_fwd_resp_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 encap_request[26];
-	__le16 encap_resp_target_id;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	encap_request[26];
+	__le16	encap_resp_target_id;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
 struct hwrm_reject_fwd_resp_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_fwd_resp */
-/* Input (40 bytes) */
+/* hwrm_fwd_resp_input (size:1024b/128B) */
 struct hwrm_fwd_resp_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 encap_resp_target_id;
-	__le16 encap_resp_cmpl_ring;
-	__le16 encap_resp_len;
-	u8 unused_0;
-	u8 unused_1;
-	__le64 encap_resp_addr;
-	__le32 encap_resp[24];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	encap_resp_target_id;
+	__le16	encap_resp_cmpl_ring;
+	__le16	encap_resp_len;
+	u8	unused_0;
+	u8	unused_1;
+	__le64	encap_resp_addr;
+	__le32	encap_resp[24];
 };
 
-/* Output (16 bytes) */
+/* hwrm_fwd_resp_output (size:128b/16B) */
 struct hwrm_fwd_resp_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_fwd_async_event_cmpl */
-/* Input (32 bytes) */
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
 struct hwrm_fwd_async_event_cmpl_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 encap_async_event_target_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2[3];
-	u8 unused_3;
-	__le32 encap_async_event_cmpl[4];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	encap_async_event_target_id;
+	u8	unused_0[6];
+	__le32	encap_async_event_cmpl[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
 struct hwrm_fwd_async_event_cmpl_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_temp_monitor_query */
-/* Input (16 bytes) */
+/* hwrm_temp_monitor_query_input (size:128b/16B) */
 struct hwrm_temp_monitor_query_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_temp_monitor_query_output (size:128b/16B) */
 struct hwrm_temp_monitor_query_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 temp;
-	u8 unused_0;
-	__le16 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	temp;
+	u8	unused_0[6];
+	u8	valid;
 };
 
-/* hwrm_wol_filter_alloc */
-/* Input (64 bytes) */
+/* hwrm_wol_filter_alloc_input (size:512b/64B) */
 struct hwrm_wol_filter_alloc_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
-	__le32 enables;
-	#define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS	    0x1UL
-	#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET	    0x2UL
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
+	__le32	enables;
+	#define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS           0x1UL
+	#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET        0x2UL
 	#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE      0x4UL
 	#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR      0x8UL
 	#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR     0x10UL
 	#define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE     0x20UL
-	__le16 port_id;
-	u8 wol_type;
-	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT		   0x0UL
-	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP		   0x1UL
-	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID		   0xffUL
-	u8 unused_0;
-	__le32 unused_1;
-	u8 mac_address[6];
-	__le16 pattern_offset;
-	__le16 pattern_buf_size;
-	__le16 pattern_mask_size;
-	__le32 unused_2;
-	__le64 pattern_buf_addr;
-	__le64 pattern_mask_addr;
+	__le16	port_id;
+	u8	wol_type;
+	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP      0x1UL
+	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID  0xffUL
+	#define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST    WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
+	u8	unused_0[5];
+	u8	mac_address[6];
+	__le16	pattern_offset;
+	__le16	pattern_buf_size;
+	__le16	pattern_mask_size;
+	u8	unused_1[4];
+	__le64	pattern_buf_addr;
+	__le64	pattern_mask_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_wol_filter_alloc_output (size:128b/16B) */
 struct hwrm_wol_filter_alloc_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 wol_filter_id;
-	u8 unused_0;
-	__le16 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	wol_filter_id;
+	u8	unused_0[6];
+	u8	valid;
 };
 
-/* hwrm_wol_filter_free */
-/* Input (32 bytes) */
+/* hwrm_wol_filter_free_input (size:256b/32B) */
 struct hwrm_wol_filter_free_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 flags;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	flags;
 	#define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS     0x1UL
-	__le32 enables;
-	#define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID	    0x1UL
-	__le16 port_id;
-	u8 wol_filter_id;
-	u8 unused_0[5];
+	__le32	enables;
+	#define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID     0x1UL
+	__le16	port_id;
+	u8	wol_filter_id;
+	u8	unused_0[5];
 };
 
-/* Output (16 bytes) */
+/* hwrm_wol_filter_free_output (size:128b/16B) */
 struct hwrm_wol_filter_free_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_wol_filter_qcfg */
-/* Input (56 bytes) */
+/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
 struct hwrm_wol_filter_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	__le16 handle;
-	__le32 unused_0;
-	__le64 pattern_buf_addr;
-	__le16 pattern_buf_size;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3[3];
-	u8 unused_4;
-	__le64 pattern_mask_addr;
-	__le16 pattern_mask_size;
-	__le16 unused_5[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	__le16	handle;
+	u8	unused_0[4];
+	__le64	pattern_buf_addr;
+	__le16	pattern_buf_size;
+	u8	unused_1[6];
+	__le64	pattern_mask_addr;
+	__le16	pattern_mask_size;
+	u8	unused_2[6];
 };
 
-/* Output (32 bytes) */
+/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
 struct hwrm_wol_filter_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 next_handle;
-	u8 wol_filter_id;
-	u8 wol_type;
-	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT		   0x0UL
-	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP		   0x1UL
-	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID		   0xffUL
-	__le32 unused_0;
-	u8 mac_address[6];
-	__le16 pattern_offset;
-	__le16 pattern_size;
-	__le16 pattern_mask_size;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	next_handle;
+	u8	wol_filter_id;
+	u8	wol_type;
+	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP      0x1UL
+	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID  0xffUL
+	#define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST    WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
+	__le32	unused_0;
+	u8	mac_address[6];
+	__le16	pattern_offset;
+	__le16	pattern_size;
+	__le16	pattern_mask_size;
+	u8	unused_1[3];
+	u8	valid;
 };
 
-/* hwrm_wol_reason_qcfg */
-/* Input (40 bytes) */
+/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
 struct hwrm_wol_reason_qcfg_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 port_id;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2[3];
-	u8 unused_3;
-	__le64 wol_pkt_buf_addr;
-	__le16 wol_pkt_buf_size;
-	__le16 unused_4[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	port_id;
+	u8	unused_0[6];
+	__le64	wol_pkt_buf_addr;
+	__le16	wol_pkt_buf_size;
+	u8	unused_1[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
 struct hwrm_wol_reason_qcfg_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 wol_filter_id;
-	u8 wol_reason;
-	#define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT	   0x0UL
-	#define WOL_REASON_QCFG_RESP_WOL_REASON_BMP		   0x1UL
-	#define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID	   0xffUL
-	u8 wol_pkt_len;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	wol_filter_id;
+	u8	wol_reason;
+	#define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+	#define WOL_REASON_QCFG_RESP_WOL_REASON_BMP      0x1UL
+	#define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID  0xffUL
+	#define WOL_REASON_QCFG_RESP_WOL_REASON_LAST    WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
+	u8	wol_pkt_len;
+	u8	unused_0[4];
+	u8	valid;
 };
 
-/* hwrm_dbg_read_direct */
-/* Input (32 bytes) */
-struct hwrm_dbg_read_direct_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 host_dest_addr;
-	__le32 read_addr;
-	__le32 read_len32;
-};
-
-/* Output (16 bytes) */
-struct hwrm_dbg_read_direct_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
-};
-
-/* hwrm_nvm_read */
-/* Input (40 bytes) */
+/* hwrm_nvm_read_input (size:320b/40B) */
 struct hwrm_nvm_read_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 host_dest_addr;
-	__le16 dir_idx;
-	u8 unused_0;
-	u8 unused_1;
-	__le32 offset;
-	__le32 len;
-	__le32 unused_2;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	host_dest_addr;
+	__le16	dir_idx;
+	u8	unused_0[2];
+	__le32	offset;
+	__le32	len;
+	u8	unused_1[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_nvm_read_output (size:128b/16B) */
 struct hwrm_nvm_read_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_nvm_get_dir_entries */
-/* Input (24 bytes) */
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
 struct hwrm_nvm_get_dir_entries_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 host_dest_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	host_dest_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
 struct hwrm_nvm_get_dir_entries_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_nvm_get_dir_info */
-/* Input (16 bytes) */
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
 struct hwrm_nvm_get_dir_info_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (24 bytes) */
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
 struct hwrm_nvm_get_dir_info_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 entries;
-	__le32 entry_length;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	entries;
+	__le32	entry_length;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_nvm_write */
-/* Input (48 bytes) */
+/* hwrm_nvm_write_input (size:384b/48B) */
 struct hwrm_nvm_write_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 host_src_addr;
-	__le16 dir_type;
-	__le16 dir_ordinal;
-	__le16 dir_ext;
-	__le16 dir_attr;
-	__le32 dir_data_length;
-	__le16 option;
-	__le16 flags;
-	#define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG	    0x1UL
-	__le32 dir_item_length;
-	__le32 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	host_src_addr;
+	__le16	dir_type;
+	__le16	dir_ordinal;
+	__le16	dir_ext;
+	__le16	dir_attr;
+	__le32	dir_data_length;
+	__le16	option;
+	__le16	flags;
+	#define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG     0x1UL
+	__le32	dir_item_length;
+	__le32	unused_0;
 };
 
-/* Output (16 bytes) */
+/* hwrm_nvm_write_output (size:128b/16B) */
 struct hwrm_nvm_write_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 dir_item_length;
-	__le16 dir_idx;
-	u8 unused_0;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	dir_item_length;
+	__le16	dir_idx;
+	u8	unused_0;
+	u8	valid;
 };
 
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
 struct hwrm_nvm_write_cmd_err {
-	u8 code;
-	#define NVM_WRITE_CMD_ERR_CODE_UNKNOWN			   0x0UL
-	#define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR		   0x1UL
-	#define NVM_WRITE_CMD_ERR_CODE_NO_SPACE		   0x2UL
-	u8 unused_0[7];
+	u8	code;
+	#define NVM_WRITE_CMD_ERR_CODE_UNKNOWN  0x0UL
+	#define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+	#define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+	#define NVM_WRITE_CMD_ERR_CODE_LAST    NVM_WRITE_CMD_ERR_CODE_NO_SPACE
+	u8	unused_0[7];
 };
 
-/* hwrm_nvm_modify */
-/* Input (40 bytes) */
+/* hwrm_nvm_modify_input (size:320b/40B) */
 struct hwrm_nvm_modify_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 host_src_addr;
-	__le16 dir_idx;
-	u8 unused_0;
-	u8 unused_1;
-	__le32 offset;
-	__le32 len;
-	__le32 unused_2;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	host_src_addr;
+	__le16	dir_idx;
+	u8	unused_0[2];
+	__le32	offset;
+	__le32	len;
+	u8	unused_1[4];
 };
 
-/* Output (16 bytes) */
+/* hwrm_nvm_modify_output (size:128b/16B) */
 struct hwrm_nvm_modify_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_nvm_find_dir_entry */
-/* Input (32 bytes) */
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
 struct hwrm_nvm_find_dir_entry_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID       0x1UL
-	__le16 dir_idx;
-	__le16 dir_type;
-	__le16 dir_ordinal;
-	__le16 dir_ext;
-	u8 opt_ordinal;
-	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK	    0x3UL
-	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT		    0
-	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ		   0x0UL
-	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE		   0x1UL
-	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT		   0x2UL
-	u8 unused_1[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID     0x1UL
+	__le16	dir_idx;
+	__le16	dir_type;
+	__le16	dir_ordinal;
+	__le16	dir_ext;
+	u8	opt_ordinal;
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ    0x0UL
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE    0x1UL
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT    0x2UL
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
+	u8	unused_0[3];
 };
 
-/* Output (32 bytes) */
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
 struct hwrm_nvm_find_dir_entry_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 dir_item_length;
-	__le32 dir_data_length;
-	__le32 fw_ver;
-	__le16 dir_ordinal;
-	__le16 dir_idx;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le32	dir_item_length;
+	__le32	dir_data_length;
+	__le32	fw_ver;
+	__le16	dir_ordinal;
+	__le16	dir_idx;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_nvm_erase_dir_entry */
-/* Input (24 bytes) */
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
 struct hwrm_nvm_erase_dir_entry_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 dir_idx;
-	__le16 unused_0[3];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	dir_idx;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
 struct hwrm_nvm_erase_dir_entry_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_nvm_get_dev_info */
-/* Input (16 bytes) */
+/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
 struct hwrm_nvm_get_dev_info_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (32 bytes) */
+/* hwrm_nvm_get_dev_info_output (size:256b/32B) */
 struct hwrm_nvm_get_dev_info_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 manufacturer_id;
-	__le16 device_id;
-	__le32 sector_size;
-	__le32 nvram_size;
-	__le32 reserved_size;
-	__le32 available_size;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	manufacturer_id;
+	__le16	device_id;
+	__le32	sector_size;
+	__le32	nvram_size;
+	__le32	reserved_size;
+	__le32	available_size;
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* hwrm_nvm_mod_dir_entry */
-/* Input (32 bytes) */
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
 struct hwrm_nvm_mod_dir_entry_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 enables;
-	#define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM		    0x1UL
-	__le16 dir_idx;
-	__le16 dir_ordinal;
-	__le16 dir_ext;
-	__le16 dir_attr;
-	__le32 checksum;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	enables;
+	#define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM     0x1UL
+	__le16	dir_idx;
+	__le16	dir_ordinal;
+	__le16	dir_ext;
+	__le16	dir_attr;
+	__le32	checksum;
 };
 
-/* Output (16 bytes) */
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
 struct hwrm_nvm_mod_dir_entry_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_nvm_verify_update */
-/* Input (24 bytes) */
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
 struct hwrm_nvm_verify_update_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le16 dir_type;
-	__le16 dir_ordinal;
-	__le16 dir_ext;
-	__le16 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le16	dir_type;
+	__le16	dir_ordinal;
+	__le16	dir_ext;
+	u8	unused_0[2];
 };
 
-/* Output (16 bytes) */
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
 struct hwrm_nvm_verify_update_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_nvm_install_update */
-/* Input (24 bytes) */
+/* hwrm_nvm_install_update_input (size:192b/24B) */
 struct hwrm_nvm_install_update_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le32 install_type;
-	#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL	   0x0UL
-	#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL	   0xffffffffUL
-	__le16 flags;
-	#define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE    0x1UL
-	#define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG     0x2UL
-	#define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG     0x4UL
-	__le16 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	install_type;
+	#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+	#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL    0xffffffffUL
+	#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST  NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
+	__le16	flags;
+	#define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE     0x1UL
+	#define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG      0x2UL
+	#define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG      0x4UL
+	u8	unused_0[2];
 };
 
-/* Output (24 bytes) */
+/* hwrm_nvm_install_update_output (size:192b/24B) */
 struct hwrm_nvm_install_update_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le64 installed_items;
-	u8 result;
-	#define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS		   0x0UL
-	u8 problem_item;
-	#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE	   0x0UL
-	#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE      0xffUL
-	u8 reset_required;
-	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE       0x0UL
-	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI	   0x1UL
-	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER      0x2UL
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le64	installed_items;
+	u8	result;
+	#define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+	#define NVM_INSTALL_UPDATE_RESP_RESULT_LAST   NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS
+	u8	problem_item;
+	#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE    0x0UL
+	#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+	#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST   NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
+	u8	reset_required;
+	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE  0x0UL
+	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI   0x1UL
+	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+	#define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
+	u8	unused_0[4];
+	u8	valid;
 };
 
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
 struct hwrm_nvm_install_update_cmd_err {
-	u8 code;
-	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN	   0x0UL
-	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR	   0x1UL
-	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE	   0x2UL
-	u8 unused_0[7];
+	u8	code;
+	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN  0x0UL
+	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+	#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST    NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
+	u8	unused_0[7];
 };
 
-/* hwrm_nvm_get_variable */
-/* Input (40 bytes) */
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
 struct hwrm_nvm_get_variable_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 dest_data_addr;
-	__le16 data_len;
-	__le16 option_num;
-	#define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0		   0x0UL
-	#define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF	   0xffffUL
-	__le16 dimensions;
-	__le16 index_0;
-	__le16 index_1;
-	__le16 index_2;
-	__le16 index_3;
-	u8 flags;
-	#define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT	    0x1UL
-	u8 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	dest_data_addr;
+	__le16	data_len;
+	__le16	option_num;
+	#define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0    0x0UL
+	#define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+	#define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST     NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+	__le16	dimensions;
+	__le16	index_0;
+	__le16	index_1;
+	__le16	index_2;
+	__le16	index_3;
+	u8	flags;
+	#define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT     0x1UL
+	u8	unused_0;
 };
 
-/* Output (16 bytes) */
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
 struct hwrm_nvm_get_variable_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 data_len;
-	__le16 option_num;
-	#define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0	   0x0UL
-	#define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF	   0xffffUL
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	__le16	data_len;
+	__le16	option_num;
+	#define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0    0x0UL
+	#define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
+	#define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST     NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
+	u8	unused_0[3];
+	u8	valid;
 };
 
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
 struct hwrm_nvm_get_variable_cmd_err {
-	u8 code;
-	#define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN		   0x0UL
-	#define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST       0x1UL
-	#define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR	   0x2UL
-	#define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT       0x3UL
-	u8 unused_0[7];
+	u8	code;
+	#define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN       0x0UL
+	#define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+	#define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR   0x2UL
+	#define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+	#define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST         NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
+	u8	unused_0[7];
 };
 
-/* hwrm_nvm_set_variable */
-/* Input (40 bytes) */
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
 struct hwrm_nvm_set_variable_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 src_data_addr;
-	__le16 data_len;
-	__le16 option_num;
-	#define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0		   0x0UL
-	#define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF	   0xffffUL
-	__le16 dimensions;
-	__le16 index_0;
-	__le16 index_1;
-	__le16 index_2;
-	__le16 index_3;
-	u8 flags;
-	#define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH		    0x1UL
-	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK       0xeUL
-	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT	    1
-	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE      (0x0UL << 1)
-	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
-	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST    NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
-	u8 unused_0;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le64	src_data_addr;
+	__le16	data_len;
+	__le16	option_num;
+	#define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0    0x0UL
+	#define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+	#define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST     NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+	__le16	dimensions;
+	__le16	index_0;
+	__le16	index_1;
+	__le16	index_2;
+	__le16	index_3;
+	u8	flags;
+	#define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH           0x1UL
+	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK     0xeUL
+	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT      1
+	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE       (0x0UL << 1)
+	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1  (0x1UL << 1)
+	#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST      NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
+	u8	unused_0;
 };
 
-/* Output (16 bytes) */
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
 struct hwrm_nvm_set_variable_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
 struct hwrm_nvm_set_variable_cmd_err {
-	u8 code;
-	#define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN		   0x0UL
-	#define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST       0x1UL
-	#define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR	   0x2UL
-	u8 unused_0[7];
+	u8	code;
+	#define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN       0x0UL
+	#define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+	#define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR   0x2UL
+	#define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST         NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
+	u8	unused_0[7];
 };
 
-/* hwrm_selftest_qlist */
-/* Input (16 bytes) */
+/* hwrm_selftest_qlist_input (size:128b/16B) */
 struct hwrm_selftest_qlist_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (280 bytes) */
+/* hwrm_selftest_qlist_output (size:2240b/280B) */
 struct hwrm_selftest_qlist_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 num_tests;
-	u8 available_tests;
-	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST       0x1UL
-	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST      0x2UL
-	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST  0x4UL
-	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST    0x8UL
-	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
-	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
-	u8 offline_tests;
-	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST	    0x1UL
-	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST	    0x2UL
-	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST    0x4UL
-	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST      0x8UL
-	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
-	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
-	u8 unused_0;
-	__le16 test_timeout;
-	u8 unused_1;
-	u8 unused_2;
-	char test0_name[32];
-	char test1_name[32];
-	char test2_name[32];
-	char test3_name[32];
-	char test4_name[32];
-	char test5_name[32];
-	char test6_name[32];
-	char test7_name[32];
-	__le32 unused_3;
-	u8 unused_4;
-	u8 unused_5;
-	u8 unused_6;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	num_tests;
+	u8	available_tests;
+	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST                 0x1UL
+	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST                0x2UL
+	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST            0x4UL
+	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST              0x8UL
+	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST         0x10UL
+	#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST     0x20UL
+	u8	offline_tests;
+	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST                 0x1UL
+	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST                0x2UL
+	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST            0x4UL
+	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST              0x8UL
+	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST         0x10UL
+	#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST     0x20UL
+	u8	unused_0;
+	__le16	test_timeout;
+	u8	unused_1[2];
+	char	test0_name[32];
+	char	test1_name[32];
+	char	test2_name[32];
+	char	test3_name[32];
+	char	test4_name[32];
+	char	test5_name[32];
+	char	test6_name[32];
+	char	test7_name[32];
+	u8	unused_2[7];
+	u8	valid;
 };
 
-/* hwrm_selftest_exec */
-/* Input (24 bytes) */
+/* hwrm_selftest_exec_input (size:192b/24B) */
 struct hwrm_selftest_exec_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	u8 flags;
-	#define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST		    0x1UL
-	#define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST		    0x2UL
-	#define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST		    0x4UL
-	#define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST		    0x8UL
-	#define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST	    0x10UL
-	#define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST       0x20UL
-	u8 unused_0[7];
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	u8	flags;
+	#define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST                 0x1UL
+	#define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST                0x2UL
+	#define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST            0x4UL
+	#define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST              0x8UL
+	#define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST         0x10UL
+	#define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST     0x20UL
+	u8	pcie_lane_num;
+	u8	unused_0[6];
 };
 
-/* Output (16 bytes) */
+/* hwrm_selftest_exec_output (size:128b/16B) */
 struct hwrm_selftest_exec_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	u8 requested_tests;
-	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST	    0x1UL
-	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST       0x2UL
-	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST   0x4UL
-	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST     0x8UL
-	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
-	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
-	u8 test_success;
-	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST	    0x1UL
-	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST	    0x2UL
-	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST      0x4UL
-	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST	    0x8UL
-	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST   0x10UL
-	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 unused_4;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	requested_tests;
+	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST                 0x1UL
+	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST                0x2UL
+	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST            0x4UL
+	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST              0x8UL
+	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST         0x10UL
+	#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST     0x20UL
+	u8	test_success;
+	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST                 0x1UL
+	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST                0x2UL
+	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST            0x4UL
+	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST              0x8UL
+	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST         0x10UL
+	#define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST     0x20UL
+	u8	unused_0[5];
+	u8	valid;
 };
 
-/* hwrm_selftest_irq */
-/* Input (16 bytes) */
+/* hwrm_selftest_irq_input (size:128b/16B) */
 struct hwrm_selftest_irq_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
 };
 
-/* Output (16 bytes) */
+/* hwrm_selftest_irq_output (size:128b/16B) */
 struct hwrm_selftest_irq_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 unused_3;
-	u8 valid;
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
 };
 
-/* hwrm_selftest_retrieve_serdes_data */
-/* Input (32 bytes) */
-struct hwrm_selftest_retrieve_serdes_data_input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-	__le64 resp_data_addr;
-	__le32 resp_data_offset;
-	__le16 data_len;
-	u8 flags;
-	#define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0xfUL
-	#define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0
-	#define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
-	#define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
-	u8 unused_0;
-};
-
-/* Output (16 bytes) */
-struct hwrm_selftest_retrieve_serdes_data_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le16 total_data_len;
-	__le16 copied_data_len;
-	u8 unused_0;
-	u8 unused_1;
-	u8 unused_2;
-	u8 valid;
-};
-
-/* Hardware Resource Manager Specification */
-/* Input (16 bytes) */
-struct input {
-	__le16 req_type;
-	__le16 cmpl_ring;
-	__le16 seq_id;
-	__le16 target_id;
-	__le64 resp_addr;
-};
-
-/* Output (8 bytes) */
-struct output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-};
-
-/* Short Command Structure (16 bytes) */
-struct hwrm_short_input {
-	__le16 req_type;
-	__le16 signature;
-	#define SHORT_REQ_SIGNATURE_SHORT_CMD			   0x4321UL
-	__le16 unused_0;
-	__le16 size;
-	__le64 req_addr;
-};
-
-/* Command numbering (8 bytes) */
-struct cmd_nums {
-	__le16 req_type;
-	#define HWRM_VER_GET					   (0x0UL)
-	#define HWRM_FUNC_BUF_UNRGTR				   (0xeUL)
-	#define HWRM_FUNC_VF_CFG				   (0xfUL)
-	#define RESERVED1					   (0x10UL)
-	#define HWRM_FUNC_RESET				   (0x11UL)
-	#define HWRM_FUNC_GETFID				   (0x12UL)
-	#define HWRM_FUNC_VF_ALLOC				   (0x13UL)
-	#define HWRM_FUNC_VF_FREE				   (0x14UL)
-	#define HWRM_FUNC_QCAPS				   (0x15UL)
-	#define HWRM_FUNC_QCFG					   (0x16UL)
-	#define HWRM_FUNC_CFG					   (0x17UL)
-	#define HWRM_FUNC_QSTATS				   (0x18UL)
-	#define HWRM_FUNC_CLR_STATS				   (0x19UL)
-	#define HWRM_FUNC_DRV_UNRGTR				   (0x1aUL)
-	#define HWRM_FUNC_VF_RESC_FREE				   (0x1bUL)
-	#define HWRM_FUNC_VF_VNIC_IDS_QUERY			   (0x1cUL)
-	#define HWRM_FUNC_DRV_RGTR				   (0x1dUL)
-	#define HWRM_FUNC_DRV_QVER				   (0x1eUL)
-	#define HWRM_FUNC_BUF_RGTR				   (0x1fUL)
-	#define HWRM_PORT_PHY_CFG				   (0x20UL)
-	#define HWRM_PORT_MAC_CFG				   (0x21UL)
-	#define HWRM_PORT_TS_QUERY				   (0x22UL)
-	#define HWRM_PORT_QSTATS				   (0x23UL)
-	#define HWRM_PORT_LPBK_QSTATS				   (0x24UL)
-	#define HWRM_PORT_CLR_STATS				   (0x25UL)
-	#define HWRM_PORT_LPBK_CLR_STATS			   (0x26UL)
-	#define HWRM_PORT_PHY_QCFG				   (0x27UL)
-	#define HWRM_PORT_MAC_QCFG				   (0x28UL)
-	#define HWRM_PORT_MAC_PTP_QCFG				   (0x29UL)
-	#define HWRM_PORT_PHY_QCAPS				   (0x2aUL)
-	#define HWRM_PORT_PHY_I2C_WRITE			   (0x2bUL)
-	#define HWRM_PORT_PHY_I2C_READ				   (0x2cUL)
-	#define HWRM_PORT_LED_CFG				   (0x2dUL)
-	#define HWRM_PORT_LED_QCFG				   (0x2eUL)
-	#define HWRM_PORT_LED_QCAPS				   (0x2fUL)
-	#define HWRM_QUEUE_QPORTCFG				   (0x30UL)
-	#define HWRM_QUEUE_QCFG				   (0x31UL)
-	#define HWRM_QUEUE_CFG					   (0x32UL)
-	#define HWRM_FUNC_VLAN_CFG				   (0x33UL)
-	#define HWRM_FUNC_VLAN_QCFG				   (0x34UL)
-	#define HWRM_QUEUE_PFCENABLE_QCFG			   (0x35UL)
-	#define HWRM_QUEUE_PFCENABLE_CFG			   (0x36UL)
-	#define HWRM_QUEUE_PRI2COS_QCFG			   (0x37UL)
-	#define HWRM_QUEUE_PRI2COS_CFG				   (0x38UL)
-	#define HWRM_QUEUE_COS2BW_QCFG				   (0x39UL)
-	#define HWRM_QUEUE_COS2BW_CFG				   (0x3aUL)
-	#define HWRM_QUEUE_DSCP_QCAPS				   (0x3bUL)
-	#define HWRM_QUEUE_DSCP2PRI_QCFG			   (0x3cUL)
-	#define HWRM_QUEUE_DSCP2PRI_CFG			   (0x3dUL)
-	#define HWRM_VNIC_ALLOC				   (0x40UL)
-	#define HWRM_VNIC_FREE					   (0x41UL)
-	#define HWRM_VNIC_CFG					   (0x42UL)
-	#define HWRM_VNIC_QCFG					   (0x43UL)
-	#define HWRM_VNIC_TPA_CFG				   (0x44UL)
-	#define HWRM_VNIC_TPA_QCFG				   (0x45UL)
-	#define HWRM_VNIC_RSS_CFG				   (0x46UL)
-	#define HWRM_VNIC_RSS_QCFG				   (0x47UL)
-	#define HWRM_VNIC_PLCMODES_CFG				   (0x48UL)
-	#define HWRM_VNIC_PLCMODES_QCFG			   (0x49UL)
-	#define HWRM_VNIC_QCAPS				   (0x4aUL)
-	#define HWRM_RING_ALLOC				   (0x50UL)
-	#define HWRM_RING_FREE					   (0x51UL)
-	#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS		   (0x52UL)
-	#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS		   (0x53UL)
-	#define HWRM_RING_RESET				   (0x5eUL)
-	#define HWRM_RING_GRP_ALLOC				   (0x60UL)
-	#define HWRM_RING_GRP_FREE				   (0x61UL)
-	#define RESERVED5					   (0x64UL)
-	#define RESERVED6					   (0x65UL)
-	#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC			   (0x70UL)
-	#define HWRM_VNIC_RSS_COS_LB_CTX_FREE			   (0x71UL)
-	#define HWRM_CFA_L2_FILTER_ALLOC			   (0x90UL)
-	#define HWRM_CFA_L2_FILTER_FREE			   (0x91UL)
-	#define HWRM_CFA_L2_FILTER_CFG				   (0x92UL)
-	#define HWRM_CFA_L2_SET_RX_MASK			   (0x93UL)
-	#define HWRM_CFA_VLAN_ANTISPOOF_CFG			   (0x94UL)
-	#define HWRM_CFA_TUNNEL_FILTER_ALLOC			   (0x95UL)
-	#define HWRM_CFA_TUNNEL_FILTER_FREE			   (0x96UL)
-	#define HWRM_CFA_ENCAP_RECORD_ALLOC			   (0x97UL)
-	#define HWRM_CFA_ENCAP_RECORD_FREE			   (0x98UL)
-	#define HWRM_CFA_NTUPLE_FILTER_ALLOC			   (0x99UL)
-	#define HWRM_CFA_NTUPLE_FILTER_FREE			   (0x9aUL)
-	#define HWRM_CFA_NTUPLE_FILTER_CFG			   (0x9bUL)
-	#define HWRM_CFA_EM_FLOW_ALLOC				   (0x9cUL)
-	#define HWRM_CFA_EM_FLOW_FREE				   (0x9dUL)
-	#define HWRM_CFA_EM_FLOW_CFG				   (0x9eUL)
-	#define HWRM_TUNNEL_DST_PORT_QUERY			   (0xa0UL)
-	#define HWRM_TUNNEL_DST_PORT_ALLOC			   (0xa1UL)
-	#define HWRM_TUNNEL_DST_PORT_FREE			   (0xa2UL)
-	#define HWRM_STAT_CTX_ALLOC				   (0xb0UL)
-	#define HWRM_STAT_CTX_FREE				   (0xb1UL)
-	#define HWRM_STAT_CTX_QUERY				   (0xb2UL)
-	#define HWRM_STAT_CTX_CLR_STATS			   (0xb3UL)
-	#define HWRM_FW_RESET					   (0xc0UL)
-	#define HWRM_FW_QSTATUS				   (0xc1UL)
-	#define HWRM_FW_SET_TIME				   (0xc8UL)
-	#define HWRM_FW_GET_TIME				   (0xc9UL)
-	#define HWRM_FW_SET_STRUCTURED_DATA			   (0xcaUL)
-	#define HWRM_FW_GET_STRUCTURED_DATA			   (0xcbUL)
-	#define HWRM_FW_IPC_MAILBOX				   (0xccUL)
-	#define HWRM_EXEC_FWD_RESP				   (0xd0UL)
-	#define HWRM_REJECT_FWD_RESP				   (0xd1UL)
-	#define HWRM_FWD_RESP					   (0xd2UL)
-	#define HWRM_FWD_ASYNC_EVENT_CMPL			   (0xd3UL)
-	#define HWRM_TEMP_MONITOR_QUERY			   (0xe0UL)
-	#define HWRM_WOL_FILTER_ALLOC				   (0xf0UL)
-	#define HWRM_WOL_FILTER_FREE				   (0xf1UL)
-	#define HWRM_WOL_FILTER_QCFG				   (0xf2UL)
-	#define HWRM_WOL_REASON_QCFG				   (0xf3UL)
-	#define HWRM_CFA_METER_PROFILE_ALLOC			   (0xf5UL)
-	#define HWRM_CFA_METER_PROFILE_FREE			   (0xf6UL)
-	#define HWRM_CFA_METER_PROFILE_CFG			   (0xf7UL)
-	#define HWRM_CFA_METER_INSTANCE_ALLOC			   (0xf8UL)
-	#define HWRM_CFA_METER_INSTANCE_FREE			   (0xf9UL)
-	#define HWRM_CFA_VFR_ALLOC				   (0xfdUL)
-	#define HWRM_CFA_VFR_FREE				   (0xfeUL)
-	#define HWRM_CFA_VF_PAIR_ALLOC				   (0x100UL)
-	#define HWRM_CFA_VF_PAIR_FREE				   (0x101UL)
-	#define HWRM_CFA_VF_PAIR_INFO				   (0x102UL)
-	#define HWRM_CFA_FLOW_ALLOC				   (0x103UL)
-	#define HWRM_CFA_FLOW_FREE				   (0x104UL)
-	#define HWRM_CFA_FLOW_FLUSH				   (0x105UL)
-	#define HWRM_CFA_FLOW_STATS				   (0x106UL)
-	#define HWRM_CFA_FLOW_INFO				   (0x107UL)
-	#define HWRM_CFA_DECAP_FILTER_ALLOC			   (0x108UL)
-	#define HWRM_CFA_DECAP_FILTER_FREE			   (0x109UL)
-	#define HWRM_CFA_VLAN_ANTISPOOF_QCFG			   (0x10aUL)
-	#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC		   (0x10bUL)
-	#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE		   (0x10cUL)
-	#define HWRM_CFA_PAIR_ALLOC				   (0x10dUL)
-	#define HWRM_CFA_PAIR_FREE				   (0x10eUL)
-	#define HWRM_CFA_PAIR_INFO				   (0x10fUL)
-	#define HWRM_FW_IPC_MSG				   (0x110UL)
-	#define HWRM_SELFTEST_QLIST				   (0x200UL)
-	#define HWRM_SELFTEST_EXEC				   (0x201UL)
-	#define HWRM_SELFTEST_IRQ				   (0x202UL)
-	#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA		   (0x203UL)
-	#define HWRM_DBG_READ_DIRECT				   (0xff10UL)
-	#define HWRM_DBG_READ_INDIRECT				   (0xff11UL)
-	#define HWRM_DBG_WRITE_DIRECT				   (0xff12UL)
-	#define HWRM_DBG_WRITE_INDIRECT			   (0xff13UL)
-	#define HWRM_DBG_DUMP					   (0xff14UL)
-	#define HWRM_DBG_ERASE_NVM				   (0xff15UL)
-	#define HWRM_DBG_CFG					   (0xff16UL)
-	#define HWRM_DBG_COREDUMP_LIST				   (0xff17UL)
-	#define HWRM_DBG_COREDUMP_INITIATE			   (0xff18UL)
-	#define HWRM_DBG_COREDUMP_RETRIEVE			   (0xff19UL)
-	#define HWRM_NVM_FACTORY_DEFAULTS			   (0xffeeUL)
-	#define HWRM_NVM_VALIDATE_OPTION			   (0xffefUL)
-	#define HWRM_NVM_FLUSH					   (0xfff0UL)
-	#define HWRM_NVM_GET_VARIABLE				   (0xfff1UL)
-	#define HWRM_NVM_SET_VARIABLE				   (0xfff2UL)
-	#define HWRM_NVM_INSTALL_UPDATE			   (0xfff3UL)
-	#define HWRM_NVM_MODIFY				   (0xfff4UL)
-	#define HWRM_NVM_VERIFY_UPDATE				   (0xfff5UL)
-	#define HWRM_NVM_GET_DEV_INFO				   (0xfff6UL)
-	#define HWRM_NVM_ERASE_DIR_ENTRY			   (0xfff7UL)
-	#define HWRM_NVM_MOD_DIR_ENTRY				   (0xfff8UL)
-	#define HWRM_NVM_FIND_DIR_ENTRY			   (0xfff9UL)
-	#define HWRM_NVM_GET_DIR_ENTRIES			   (0xfffaUL)
-	#define HWRM_NVM_GET_DIR_INFO				   (0xfffbUL)
-	#define HWRM_NVM_RAW_DUMP				   (0xfffcUL)
-	#define HWRM_NVM_READ					   (0xfffdUL)
-	#define HWRM_NVM_WRITE					   (0xfffeUL)
-	#define HWRM_NVM_RAW_WRITE_BLK				   (0xffffUL)
-	__le16 unused_0[3];
-};
-
-/* Return Codes (8 bytes) */
-struct ret_codes {
-	__le16 error_code;
-	#define HWRM_ERR_CODE_SUCCESS				   (0x0UL)
-	#define HWRM_ERR_CODE_FAIL				   (0x1UL)
-	#define HWRM_ERR_CODE_INVALID_PARAMS			   (0x2UL)
-	#define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED		   (0x3UL)
-	#define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR		   (0x4UL)
-	#define HWRM_ERR_CODE_INVALID_FLAGS			   (0x5UL)
-	#define HWRM_ERR_CODE_INVALID_ENABLES			   (0x6UL)
-	#define HWRM_ERR_CODE_HWRM_ERROR			   (0xfUL)
-	#define HWRM_ERR_CODE_UNKNOWN_ERR			   (0xfffeUL)
-	#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED		   (0xffffUL)
-	__le16 unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_err_output {
-	__le16 error_code;
-	__le16 req_type;
-	__le16 seq_id;
-	__le16 resp_len;
-	__le32 opaque_0;
-	__le16 opaque_1;
-	u8 cmd_err;
-	u8 valid;
-};
-
-/* Port Tx Statistics Formats (408 bytes) */
-struct tx_port_stats {
-	__le64 tx_64b_frames;
-	__le64 tx_65b_127b_frames;
-	__le64 tx_128b_255b_frames;
-	__le64 tx_256b_511b_frames;
-	__le64 tx_512b_1023b_frames;
-	__le64 tx_1024b_1518_frames;
-	__le64 tx_good_vlan_frames;
-	__le64 tx_1519b_2047_frames;
-	__le64 tx_2048b_4095b_frames;
-	__le64 tx_4096b_9216b_frames;
-	__le64 tx_9217b_16383b_frames;
-	__le64 tx_good_frames;
-	__le64 tx_total_frames;
-	__le64 tx_ucast_frames;
-	__le64 tx_mcast_frames;
-	__le64 tx_bcast_frames;
-	__le64 tx_pause_frames;
-	__le64 tx_pfc_frames;
-	__le64 tx_jabber_frames;
-	__le64 tx_fcs_err_frames;
-	__le64 tx_control_frames;
-	__le64 tx_oversz_frames;
-	__le64 tx_single_dfrl_frames;
-	__le64 tx_multi_dfrl_frames;
-	__le64 tx_single_coll_frames;
-	__le64 tx_multi_coll_frames;
-	__le64 tx_late_coll_frames;
-	__le64 tx_excessive_coll_frames;
-	__le64 tx_frag_frames;
-	__le64 tx_err;
-	__le64 tx_tagged_frames;
-	__le64 tx_dbl_tagged_frames;
-	__le64 tx_runt_frames;
-	__le64 tx_fifo_underruns;
-	__le64 tx_pfc_ena_frames_pri0;
-	__le64 tx_pfc_ena_frames_pri1;
-	__le64 tx_pfc_ena_frames_pri2;
-	__le64 tx_pfc_ena_frames_pri3;
-	__le64 tx_pfc_ena_frames_pri4;
-	__le64 tx_pfc_ena_frames_pri5;
-	__le64 tx_pfc_ena_frames_pri6;
-	__le64 tx_pfc_ena_frames_pri7;
-	__le64 tx_eee_lpi_events;
-	__le64 tx_eee_lpi_duration;
-	__le64 tx_llfc_logical_msgs;
-	__le64 tx_hcfc_msgs;
-	__le64 tx_total_collisions;
-	__le64 tx_bytes;
-	__le64 tx_xthol_frames;
-	__le64 tx_stat_discard;
-	__le64 tx_stat_error;
-};
-
-/* Port Rx Statistics Formats (528 bytes) */
-struct rx_port_stats {
-	__le64 rx_64b_frames;
-	__le64 rx_65b_127b_frames;
-	__le64 rx_128b_255b_frames;
-	__le64 rx_256b_511b_frames;
-	__le64 rx_512b_1023b_frames;
-	__le64 rx_1024b_1518_frames;
-	__le64 rx_good_vlan_frames;
-	__le64 rx_1519b_2047b_frames;
-	__le64 rx_2048b_4095b_frames;
-	__le64 rx_4096b_9216b_frames;
-	__le64 rx_9217b_16383b_frames;
-	__le64 rx_total_frames;
-	__le64 rx_ucast_frames;
-	__le64 rx_mcast_frames;
-	__le64 rx_bcast_frames;
-	__le64 rx_fcs_err_frames;
-	__le64 rx_ctrl_frames;
-	__le64 rx_pause_frames;
-	__le64 rx_pfc_frames;
-	__le64 rx_unsupported_opcode_frames;
-	__le64 rx_unsupported_da_pausepfc_frames;
-	__le64 rx_wrong_sa_frames;
-	__le64 rx_align_err_frames;
-	__le64 rx_oor_len_frames;
-	__le64 rx_code_err_frames;
-	__le64 rx_false_carrier_frames;
-	__le64 rx_ovrsz_frames;
-	__le64 rx_jbr_frames;
-	__le64 rx_mtu_err_frames;
-	__le64 rx_match_crc_frames;
-	__le64 rx_promiscuous_frames;
-	__le64 rx_tagged_frames;
-	__le64 rx_double_tagged_frames;
-	__le64 rx_trunc_frames;
-	__le64 rx_good_frames;
-	__le64 rx_pfc_xon2xoff_frames_pri0;
-	__le64 rx_pfc_xon2xoff_frames_pri1;
-	__le64 rx_pfc_xon2xoff_frames_pri2;
-	__le64 rx_pfc_xon2xoff_frames_pri3;
-	__le64 rx_pfc_xon2xoff_frames_pri4;
-	__le64 rx_pfc_xon2xoff_frames_pri5;
-	__le64 rx_pfc_xon2xoff_frames_pri6;
-	__le64 rx_pfc_xon2xoff_frames_pri7;
-	__le64 rx_pfc_ena_frames_pri0;
-	__le64 rx_pfc_ena_frames_pri1;
-	__le64 rx_pfc_ena_frames_pri2;
-	__le64 rx_pfc_ena_frames_pri3;
-	__le64 rx_pfc_ena_frames_pri4;
-	__le64 rx_pfc_ena_frames_pri5;
-	__le64 rx_pfc_ena_frames_pri6;
-	__le64 rx_pfc_ena_frames_pri7;
-	__le64 rx_sch_crc_err_frames;
-	__le64 rx_undrsz_frames;
-	__le64 rx_frag_frames;
-	__le64 rx_eee_lpi_events;
-	__le64 rx_eee_lpi_duration;
-	__le64 rx_llfc_physical_msgs;
-	__le64 rx_llfc_logical_msgs;
-	__le64 rx_llfc_msgs_with_crc_err;
-	__le64 rx_hcfc_msgs;
-	__le64 rx_hcfc_msgs_with_crc_err;
-	__le64 rx_bytes;
-	__le64 rx_runt_bytes;
-	__le64 rx_runt_frames;
-	__le64 rx_stat_discard;
-	__le64 rx_stat_err;
-};
-
-/* VXLAN IPv4 encapsulation structure (16 bytes) */
-struct hwrm_vxlan_ipv4_hdr {
-	u8 ver_hlen;
-	#define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK	    0xfUL
-	#define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT	    0
-	#define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK		    0xf0UL
-	#define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT		    4
-	u8 tos;
-	__be16 ip_id;
-	__be16 flags_frag_offset;
-	u8 ttl;
-	u8 protocol;
-	__be32 src_ip_addr;
-	__be32 dest_ip_addr;
-};
-
-/* VXLAN IPv6 encapsulation structure (32 bytes) */
-struct hwrm_vxlan_ipv6_hdr {
-	__be32 ver_tc_flow_label;
-	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT	   0x1cUL
-	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK	   0xf0000000UL
-	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT	   0x14UL
-	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK	   0xff00000UL
-	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT   0x0UL
-	#define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK  0xfffffUL
-	__be16 payload_len;
-	u8 next_hdr;
-	u8 ttl;
-	__be32 src_ip_addr[4];
-	__be32 dest_ip_addr[4];
-};
-
-/* VXLAN encapsulation structure (72 bytes) */
-struct hwrm_cfa_encap_data_vxlan {
-	u8 src_mac_addr[6];
-	__le16 unused_0;
-	u8 dst_mac_addr[6];
-	u8 num_vlan_tags;
-	u8 unused_1;
-	__be16 ovlan_tpid;
-	__be16 ovlan_tci;
-	__be16 ivlan_tpid;
-	__be16 ivlan_tci;
-	__le32 l3[10];
-	#define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK		   0xfUL
-	#define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4		   0x4UL
-	#define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6		   0x6UL
-	__be16 src_port;
-	__be16 dst_port;
-	__be32 vni;
-};
-
-/* Periodic Statistics Context DMA to host (160 bytes) */
-struct ctx_hw_stats {
-	__le64 rx_ucast_pkts;
-	__le64 rx_mcast_pkts;
-	__le64 rx_bcast_pkts;
-	__le64 rx_discard_pkts;
-	__le64 rx_drop_pkts;
-	__le64 rx_ucast_bytes;
-	__le64 rx_mcast_bytes;
-	__le64 rx_bcast_bytes;
-	__le64 tx_ucast_pkts;
-	__le64 tx_mcast_pkts;
-	__le64 tx_bcast_pkts;
-	__le64 tx_discard_pkts;
-	__le64 tx_drop_pkts;
-	__le64 tx_ucast_bytes;
-	__le64 tx_mcast_bytes;
-	__le64 tx_bcast_bytes;
-	__le64 tpa_pkts;
-	__le64 tpa_bytes;
-	__le64 tpa_events;
-	__le64 tpa_aborts;
-};
-
-/* Structure data header (16 bytes) */
-struct hwrm_struct_hdr {
-	__le16 struct_id;
-	#define STRUCT_HDR_STRUCT_ID_LLDP_CFG			   0x41bUL
-	#define STRUCT_HDR_STRUCT_ID_DCBX_ETS			   0x41dUL
-	#define STRUCT_HDR_STRUCT_ID_DCBX_PFC			   0x41fUL
-	#define STRUCT_HDR_STRUCT_ID_DCBX_APP			   0x421UL
-	#define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE	   0x422UL
-	#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC		   0x424UL
-	#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE		   0x426UL
-	#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE		   0x1UL
-	#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION		   0xaUL
-	#define STRUCT_HDR_STRUCT_ID_RSS_V2			   0x64UL
-	__le16 len;
-	u8 version;
-	u8 count;
-	__le16 subtype;
-	__le16 next_offset;
-	#define STRUCT_HDR_NEXT_OFFSET_LAST			   0x0UL
-	__le16 unused_0[3];
-};
-
-/* DCBX Application configuration structure (1057) (8 bytes) */
-struct hwrm_struct_data_dcbx_app {
-	__be16 protocol_id;
-	u8 protocol_selector;
-	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
-	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT   0x2UL
-	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT   0x3UL
-	#define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
-	u8 priority;
-	u8 valid;
-	u8 unused_0[3];
-};
-
-#endif
+#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index c961767..d87faad 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -135,7 +135,10 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
 	ivi->vf = vf_id;
 	vf = &bp->pf.vf[vf_id];
 
-	memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+	if (is_valid_ether_addr(vf->mac_addr))
+		memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+	else
+		memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
 	ivi->max_tx_rate = vf->max_tx_rate;
 	ivi->min_tx_rate = vf->min_tx_rate;
 	ivi->vlan = vf->vlan;
@@ -416,29 +419,126 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 }
 
-/* only call by PF to reserve resources for VF */
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
+static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
+{
+	struct hwrm_func_vf_resource_cfg_input req = {0};
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
+	u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
+	struct bnxt_pf_info *pf = &bp->pf;
+	int i, rc = 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
+
+	vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
+	vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
+	else
+		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
+	vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
+	vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
+	vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
+	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
+
+	req.min_rsscos_ctx = cpu_to_le16(1);
+	req.max_rsscos_ctx = cpu_to_le16(1);
+	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
+		req.min_cmpl_rings = cpu_to_le16(1);
+		req.min_tx_rings = cpu_to_le16(1);
+		req.min_rx_rings = cpu_to_le16(1);
+		req.min_l2_ctxs = cpu_to_le16(1);
+		req.min_vnics = cpu_to_le16(1);
+		req.min_stat_ctx = cpu_to_le16(1);
+		req.min_hw_ring_grps = cpu_to_le16(1);
+	} else {
+		vf_cp_rings /= num_vfs;
+		vf_tx_rings /= num_vfs;
+		vf_rx_rings /= num_vfs;
+		vf_vnics /= num_vfs;
+		vf_stat_ctx /= num_vfs;
+		vf_ring_grps /= num_vfs;
+
+		req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
+		req.min_tx_rings = cpu_to_le16(vf_tx_rings);
+		req.min_rx_rings = cpu_to_le16(vf_rx_rings);
+		req.min_l2_ctxs = cpu_to_le16(4);
+		req.min_vnics = cpu_to_le16(vf_vnics);
+		req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
+		req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+	}
+	req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
+	req.max_tx_rings = cpu_to_le16(vf_tx_rings);
+	req.max_rx_rings = cpu_to_le16(vf_rx_rings);
+	req.max_l2_ctxs = cpu_to_le16(4);
+	req.max_vnics = cpu_to_le16(vf_vnics);
+	req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
+	req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < num_vfs; i++) {
+		req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc) {
+			rc = -ENOMEM;
+			break;
+		}
+		pf->active_vfs = i + 1;
+		pf->vf[i].fw_fid = pf->first_vf_id + i;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	if (pf->active_vfs) {
+		u16 n = 1;
+
+		if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL)
+			n = pf->active_vfs;
+
+		hw_resc->max_tx_rings -= vf_tx_rings * n;
+		hw_resc->max_rx_rings -= vf_rx_rings * n;
+		hw_resc->max_hw_ring_grps -= vf_ring_grps * n;
+		hw_resc->max_cp_rings -= vf_cp_rings * n;
+		hw_resc->max_rsscos_ctxs -= pf->active_vfs;
+		hw_resc->max_stat_ctxs -= vf_stat_ctx * n;
+		hw_resc->max_vnics -= vf_vnics * n;
+
+		rc = pf->active_vfs;
+	}
+	return rc;
+}
+
+/* Only called by PF to reserve resources for VFs, returns actual number of
+ * VFs configured, or < 0 on error.
+ */
 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 {
 	u32 rc = 0, mtu, i;
 	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
-	u16 vf_ring_grps;
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+	u16 vf_ring_grps, max_stat_ctxs;
 	struct hwrm_func_cfg_input req = {0};
 	struct bnxt_pf_info *pf = &bp->pf;
 	int total_vf_tx_rings = 0;
 
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 
+	max_stat_ctxs = hw_resc->max_stat_ctxs;
+
 	/* Remaining rings are distributed equally amongs VF's for now */
-	vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
-	vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
+	vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
+	vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
-		vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
+		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
 			      num_vfs;
 	else
-		vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
-	vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
-	vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
-	vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
+		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
+			      num_vfs;
+	vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
+	vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
+	vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
 	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
 
 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
@@ -485,22 +585,34 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 		total_vf_tx_rings += vf_tx_rsvd;
 	}
 	mutex_unlock(&bp->hwrm_cmd_lock);
-	if (!rc) {
-		pf->max_tx_rings -= total_vf_tx_rings;
-		pf->max_rx_rings -= vf_rx_rings * num_vfs;
-		pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
-		pf->max_cp_rings -= vf_cp_rings * num_vfs;
-		pf->max_rsscos_ctxs -= num_vfs;
-		pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
-		pf->max_vnics -= vf_vnics * num_vfs;
+	if (rc)
+		rc = -ENOMEM;
+	if (pf->active_vfs) {
+		hw_resc->max_tx_rings -= total_vf_tx_rings;
+		hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
+		hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
+		hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
+		hw_resc->max_rsscos_ctxs -= num_vfs;
+		hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
+		hw_resc->max_vnics -= vf_vnics * num_vfs;
+		rc = pf->active_vfs;
 	}
 	return rc;
 }
 
+static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
+{
+	if (bp->flags & BNXT_FLAG_NEW_RM)
+		return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
+	else
+		return bnxt_hwrm_func_cfg(bp, num_vfs);
+}
+
 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
 {
 	int rc = 0, vfs_supported;
 	int min_rx_rings, min_tx_rings, min_rss_ctxs;
+	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 	int tx_ok = 0, rx_ok = 0, rss_ok = 0;
 	int avail_cp, avail_stat;
 
@@ -510,8 +622,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
 	 */
 	vfs_supported = *num_vfs;
 
-	avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
-	avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
+	avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
+	avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
 	avail_cp = min_t(int, avail_cp, avail_stat);
 
 	while (vfs_supported) {
@@ -520,23 +632,24 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
 		min_rss_ctxs = vfs_supported;
 
 		if (bp->flags & BNXT_FLAG_AGG_RINGS) {
-			if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+			if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
 			    min_rx_rings)
 				rx_ok = 1;
 		} else {
-			if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+			if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
 			    min_rx_rings)
 				rx_ok = 1;
 		}
-		if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
+		if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
 		    avail_cp < min_rx_rings)
 			rx_ok = 0;
 
-		if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
+		if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
 		    avail_cp >= min_tx_rings)
 			tx_ok = 1;
 
-		if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+		if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
+		    min_rss_ctxs)
 			rss_ok = 1;
 
 		if (tx_ok && rx_ok && rss_ok)
@@ -561,9 +674,16 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
 		goto err_out1;
 
 	/* Reserve resources for VFs */
-	rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
-	if (rc)
-		goto err_out2;
+	rc = bnxt_func_cfg(bp, *num_vfs);
+	if (rc != *num_vfs) {
+		if (rc <= 0) {
+			netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
+			*num_vfs = 0;
+			goto err_out2;
+		}
+		netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
+		*num_vfs = rc;
+	}
 
 	/* Register buffers for VFs */
 	rc = bnxt_hwrm_func_buf_rgtr(bp);
@@ -766,17 +886,51 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
 	return rc;
 }
 
+static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+	u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
+	struct hwrm_func_vf_cfg_input *req =
+		(struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
+
+	/* Only allow VF to set a valid MAC address if the PF assigned MAC
+	 * address is zero
+	 */
+	if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
+		if (is_valid_ether_addr(req->dflt_mac_addr) &&
+		    !is_valid_ether_addr(vf->mac_addr)) {
+			ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
+			return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+		}
+		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+	}
+	return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+}
+
 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
 {
 	u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
 	struct hwrm_cfa_l2_filter_alloc_input *req =
 		(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+	bool mac_ok = false;
 
-	if (!is_valid_ether_addr(vf->mac_addr) ||
-	    ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+	/* VF MAC address must first match PF MAC address, if it is valid.
+	 * Otherwise, it must match the VF MAC address if firmware spec >=
+	 * 1.2.2
+	 */
+	if (is_valid_ether_addr(vf->mac_addr)) {
+		if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+			mac_ok = true;
+	} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
+		if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
+			mac_ok = true;
+	} else if (bp->hwrm_spec_code < 0x10202) {
+		mac_ok = true;
+	} else {
+		mac_ok = true;
+	}
+	if (mac_ok)
 		return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
-	else
-		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+	return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
 }
 
 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
@@ -838,6 +992,9 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
 	u32 req_type = le16_to_cpu(encap_req->req_type);
 
 	switch (req_type) {
+	case HWRM_FUNC_VF_CFG:
+		rc = bnxt_vf_store_mac(bp, vf);
+		break;
 	case HWRM_CFA_L2_FILTER_ALLOC:
 		rc = bnxt_vf_validate_set_mac(bp, vf);
 		break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 1d9b08c..2ece164 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -43,7 +43,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
 	}
 
 	/* Is dev a VF-rep? */
-	if (dev != pf_bp->dev)
+	if (bnxt_dev_is_vf_rep(dev))
 		return bnxt_vf_rep_get_fid(dev);
 
 	bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 69186d18..2ca11be 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -241,6 +241,11 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
 	.ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
 };
 
+bool bnxt_dev_is_vf_rep(struct net_device *dev)
+{
+	return dev->netdev_ops == &bnxt_vf_rep_netdev_ops;
+}
+
 /* Called when the parent PF interface is closed:
  * As the mode transition from SWITCHDEV to LEGACY
  * happens under the rtnl_lock() this routine is safe
@@ -376,6 +381,26 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
 	ether_addr_copy(dev->dev_addr, dev->perm_addr);
 }
 
+static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
+{
+	struct pci_dev *pdev = bp->pdev;
+	int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+	u32 dw;
+
+	if (!pos) {
+		netdev_info(bp->dev, "Unable do read adapter's DSN");
+		return -EOPNOTSUPP;
+	}
+
+	/* DSN (two dw) is at an offset of 4 from the cap pos */
+	pos += 4;
+	pci_read_config_dword(pdev, pos, &dw);
+	put_unaligned_le32(dw, &dsn[0]);
+	pci_read_config_dword(pdev, pos + 4, &dw);
+	put_unaligned_le32(dw, &dsn[4]);
+	return 0;
+}
+
 static int bnxt_vf_reps_create(struct bnxt *bp)
 {
 	u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
@@ -440,6 +465,11 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
 		}
 	}
 
+	/* Read the adapter's DSN to use as the eswitch switch_id */
+	rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
+	if (rc)
+		goto err;
+
 	/* publish cfa_code_map only after all VF-reps have been initialized */
 	bp->cfa_code_map = cfa_code_map;
 	bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index fb06bbe..38b9a75 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -28,6 +28,7 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
 	return bp->pf.vf[vf_rep->vf_idx].fw_fid;
 }
 
+bool bnxt_dev_is_vf_rep(struct net_device *dev);
 int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
 int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
 
@@ -54,5 +55,10 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
 {
 	return 0;
 }
+
+static inline bool bnxt_dev_is_vf_rep(struct net_device *dev)
+{
+	return false;
+}
 #endif /* CONFIG_BNXT_SRIOV */
 #endif /* BNXT_VFR_H */
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 63be75e..043e3c1 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -27,6 +27,7 @@
 
 config THUNDER_NIC_VF
 	tristate "Thunder Virtual function driver"
+	imply CAVIUM_PTP
 	depends on 64BIT
 	---help---
 	  This driver supports Thunder's NIC virtual function
@@ -50,6 +51,18 @@
 	  This driver supports configuring XCV block of RGX interface
 	  present on CN81XX chip.
 
+config CAVIUM_PTP
+	tristate "Cavium PTP coprocessor as PTP clock"
+	depends on 64BIT
+	imply PTP_1588_CLOCK
+	default y
+	---help---
+	  This driver adds support for the Precision Time Protocol Clocks and
+	  Timestamping coprocessor (PTP) found on Cavium processors.
+	  PTP provides timestamping mechanism that is suitable for use in IEEE 1588
+	  Precision Time Protocol or other purposes.  Timestamps can be used in
+	  BGX, TNS, GTI, and NIC blocks.
+
 config LIQUIDIO
 	tristate "Cavium LiquidIO support"
 	depends on 64BIT
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
index 872da9f..946bba8 100644
--- a/drivers/net/ethernet/cavium/Makefile
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -1,6 +1,7 @@
 #
 # Makefile for the Cavium ethernet device drivers.
 #
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += common/
 obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
 obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/
 obj-$(CONFIG_NET_VENDOR_CAVIUM) += octeon/
diff --git a/drivers/net/ethernet/cavium/common/Makefile b/drivers/net/ethernet/cavium/common/Makefile
new file mode 100644
index 0000000..dd8561b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CAVIUM_PTP) += cavium_ptp.o
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
new file mode 100644
index 0000000..c87c9c6
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.c - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timecounter.h>
+#include <linux/pci.h>
+
+#include "cavium_ptp.h"
+
+#define DRV_NAME	"Cavium PTP Driver"
+
+#define PCI_DEVICE_ID_CAVIUM_PTP	0xA00C
+#define PCI_DEVICE_ID_CAVIUM_RST	0xA00E
+
+#define PCI_PTP_BAR_NO	0
+#define PCI_RST_BAR_NO	0
+
+#define PTP_CLOCK_CFG		0xF00ULL
+#define  PTP_CLOCK_CFG_PTP_EN	BIT(0)
+#define PTP_CLOCK_LO		0xF08ULL
+#define PTP_CLOCK_HI		0xF10ULL
+#define PTP_CLOCK_COMP		0xF18ULL
+
+#define RST_BOOT	0x1600ULL
+#define CLOCK_BASE_RATE	50000000ULL
+
+static u64 ptp_cavium_clock_get(void)
+{
+	struct pci_dev *pdev;
+	void __iomem *base;
+	u64 ret = CLOCK_BASE_RATE * 16;
+
+	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+			      PCI_DEVICE_ID_CAVIUM_RST, NULL);
+	if (!pdev)
+		goto error;
+
+	base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
+	if (!base)
+		goto error_put_pdev;
+
+	ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT) >> 33) & 0x3f);
+
+	iounmap(base);
+
+error_put_pdev:
+	pci_dev_put(pdev);
+
+error:
+	return ret;
+}
+
+struct cavium_ptp *cavium_ptp_get(void)
+{
+	struct cavium_ptp *ptp;
+	struct pci_dev *pdev;
+
+	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+			      PCI_DEVICE_ID_CAVIUM_PTP, NULL);
+	if (!pdev)
+		return ERR_PTR(-ENODEV);
+
+	ptp = pci_get_drvdata(pdev);
+	if (!ptp)
+		ptp = ERR_PTR(-EPROBE_DEFER);
+	if (IS_ERR(ptp))
+		pci_dev_put(pdev);
+
+	return ptp;
+}
+EXPORT_SYMBOL(cavium_ptp_get);
+
+void cavium_ptp_put(struct cavium_ptp *ptp)
+{
+	pci_dev_put(ptp->pdev);
+}
+EXPORT_SYMBOL(cavium_ptp_put);
+
+/**
+ * cavium_ptp_adjfine() - Adjust ptp frequency
+ * @ptp: PTP clock info
+ * @scaled_ppm: how much to adjust by, in parts per million, but with a
+ *              16 bit binary fractional field
+ */
+static int cavium_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+	struct cavium_ptp *clock =
+		container_of(ptp_info, struct cavium_ptp, ptp_info);
+	unsigned long flags;
+	u64 comp;
+	u64 adj;
+	bool neg_adj = false;
+
+	if (scaled_ppm < 0) {
+		neg_adj = true;
+		scaled_ppm = -scaled_ppm;
+	}
+
+	/* The hardware adds the clock compensation value to the PTP clock
+	 * on every coprocessor clock cycle. Typical convention is that it
+	 * represent number of nanosecond betwen each cycle. In this
+	 * convention compensation value is in 64 bit fixed-point
+	 * representation where upper 32 bits are number of nanoseconds
+	 * and lower is fractions of nanosecond.
+	 * The scaled_ppm represent the ratio in "parts per bilion" by which the
+	 * compensation value should be corrected.
+	 * To calculate new compenstation value we use 64bit fixed point
+	 * arithmetic on following formula
+	 * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+	 * where tbase is the basic compensation value calculated initialy
+	 * in cavium_ptp_init() -> tbase = 1/Hz. Then we use endian
+	 * independent structure definition to write data to PTP register.
+	 */
+	comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+	adj = comp * scaled_ppm;
+	adj >>= 16;
+	adj = div_u64(adj, 1000000ull);
+	comp = neg_adj ? comp - adj : comp + adj;
+
+	spin_lock_irqsave(&clock->spin_lock, flags);
+	writeq(comp, clock->reg_base + PTP_CLOCK_COMP);
+	spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+	return 0;
+}
+
+/**
+ * cavium_ptp_adjtime() - Adjust ptp time
+ * @ptp:   PTP clock info
+ * @delta: how much to adjust by, in nanosecs
+ */
+static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+	struct cavium_ptp *clock =
+		container_of(ptp_info, struct cavium_ptp, ptp_info);
+	unsigned long flags;
+
+	spin_lock_irqsave(&clock->spin_lock, flags);
+	timecounter_adjtime(&clock->time_counter, delta);
+	spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+	/* Sync, for network driver to get latest value */
+	smp_mb();
+
+	return 0;
+}
+
+/**
+ * cavium_ptp_gettime() - Get hardware clock time with adjustment
+ * @ptp: PTP clock info
+ * @ts:  timespec
+ */
+static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info,
+			      struct timespec64 *ts)
+{
+	struct cavium_ptp *clock =
+		container_of(ptp_info, struct cavium_ptp, ptp_info);
+	unsigned long flags;
+	u64 nsec;
+
+	spin_lock_irqsave(&clock->spin_lock, flags);
+	nsec = timecounter_read(&clock->time_counter);
+	spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+	*ts = ns_to_timespec64(nsec);
+
+	return 0;
+}
+
+/**
+ * cavium_ptp_settime() - Set hardware clock time. Reset adjustment
+ * @ptp: PTP clock info
+ * @ts:  timespec
+ */
+static int cavium_ptp_settime(struct ptp_clock_info *ptp_info,
+			      const struct timespec64 *ts)
+{
+	struct cavium_ptp *clock =
+		container_of(ptp_info, struct cavium_ptp, ptp_info);
+	unsigned long flags;
+	u64 nsec;
+
+	nsec = timespec64_to_ns(ts);
+
+	spin_lock_irqsave(&clock->spin_lock, flags);
+	timecounter_init(&clock->time_counter, &clock->cycle_counter, nsec);
+	spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+	return 0;
+}
+
+/**
+ * cavium_ptp_enable() - Request to enable or disable an ancillary feature.
+ * @ptp: PTP clock info
+ * @rq:  request
+ * @on:  is it on
+ */
+static int cavium_ptp_enable(struct ptp_clock_info *ptp_info,
+			     struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+static u64 cavium_ptp_cc_read(const struct cyclecounter *cc)
+{
+	struct cavium_ptp *clock =
+		container_of(cc, struct cavium_ptp, cycle_counter);
+
+	return readq(clock->reg_base + PTP_CLOCK_HI);
+}
+
+static int cavium_ptp_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct cavium_ptp *clock;
+	struct cyclecounter *cc;
+	u64 clock_cfg;
+	u64 clock_comp;
+	int err;
+
+	clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
+	if (!clock) {
+		err = -ENOMEM;
+		goto error;
+	}
+
+	clock->pdev = pdev;
+
+	err = pcim_enable_device(pdev);
+	if (err)
+		goto error_free;
+
+	err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+	if (err)
+		goto error_free;
+
+	clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+	spin_lock_init(&clock->spin_lock);
+
+	cc = &clock->cycle_counter;
+	cc->read = cavium_ptp_cc_read;
+	cc->mask = CYCLECOUNTER_MASK(64);
+	cc->mult = 1;
+	cc->shift = 0;
+
+	timecounter_init(&clock->time_counter, &clock->cycle_counter,
+			 ktime_to_ns(ktime_get_real()));
+
+	clock->clock_rate = ptp_cavium_clock_get();
+
+	clock->ptp_info = (struct ptp_clock_info) {
+		.owner		= THIS_MODULE,
+		.name		= "ThunderX PTP",
+		.max_adj	= 1000000000ull,
+		.n_ext_ts	= 0,
+		.n_pins		= 0,
+		.pps		= 0,
+		.adjfine	= cavium_ptp_adjfine,
+		.adjtime	= cavium_ptp_adjtime,
+		.gettime64	= cavium_ptp_gettime,
+		.settime64	= cavium_ptp_settime,
+		.enable		= cavium_ptp_enable,
+	};
+
+	clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+	clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+	writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+
+	clock_comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+	writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP);
+
+	clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev);
+	if (!clock->ptp_clock) {
+		err = -ENODEV;
+		goto error_stop;
+	}
+	if (IS_ERR(clock->ptp_clock)) {
+		err = PTR_ERR(clock->ptp_clock);
+		goto error_stop;
+	}
+
+	pci_set_drvdata(pdev, clock);
+	return 0;
+
+error_stop:
+	clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+	clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+	writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+	pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO);
+
+error_free:
+	devm_kfree(dev, clock);
+
+error:
+	/* For `cavium_ptp_get()` we need to differentiate between the case
+	 * when the core has not tried to probe this device and the case when
+	 * the probe failed.  In the later case we pretend that the
+	 * initialization was successful and keep the error in
+	 * `dev->driver_data`.
+	 */
+	pci_set_drvdata(pdev, ERR_PTR(err));
+	return 0;
+}
+
+static void cavium_ptp_remove(struct pci_dev *pdev)
+{
+	struct cavium_ptp *clock = pci_get_drvdata(pdev);
+	u64 clock_cfg;
+
+	if (IS_ERR_OR_NULL(clock))
+		return;
+
+	ptp_clock_unregister(clock->ptp_clock);
+
+	clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+	clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+	writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+}
+
+static const struct pci_device_id cavium_ptp_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP) },
+	{ 0, }
+};
+
+static struct pci_driver cavium_ptp_driver = {
+	.name = DRV_NAME,
+	.id_table = cavium_ptp_id_table,
+	.probe = cavium_ptp_probe,
+	.remove = cavium_ptp_remove,
+};
+
+static int __init cavium_ptp_init_module(void)
+{
+	return pci_register_driver(&cavium_ptp_driver);
+}
+
+static void __exit cavium_ptp_cleanup_module(void)
+{
+	pci_unregister_driver(&cavium_ptp_driver);
+}
+
+module_init(cavium_ptp_init_module);
+module_exit(cavium_ptp_cleanup_module);
+
+MODULE_DESCRIPTION(DRV_NAME);
+MODULE_AUTHOR("Cavium Networks <support@cavium.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, cavium_ptp_id_table);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h
new file mode 100644
index 0000000..be2bafc
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.h - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#ifndef CAVIUM_PTP_H
+#define CAVIUM_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
+struct cavium_ptp {
+	struct pci_dev *pdev;
+
+	/* Serialize access to cycle_counter, time_counter and hw_registers */
+	spinlock_t spin_lock;
+	struct cyclecounter cycle_counter;
+	struct timecounter time_counter;
+	void __iomem *reg_base;
+
+	u32 clock_rate;
+
+	struct ptp_clock_info ptp_info;
+	struct ptp_clock *ptp_clock;
+};
+
+#if IS_ENABLED(CONFIG_CAVIUM_PTP)
+
+struct cavium_ptp *cavium_ptp_get(void);
+void cavium_ptp_put(struct cavium_ptp *ptp);
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+	unsigned long flags;
+	u64 ret;
+
+	spin_lock_irqsave(&ptp->spin_lock, flags);
+	ret = timecounter_cyc2time(&ptp->time_counter, tstamp);
+	spin_unlock_irqrestore(&ptp->spin_lock, flags);
+
+	return ret;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+	return ptp_clock_index(clock->ptp_clock);
+}
+
+#else
+
+static inline struct cavium_ptp *cavium_ptp_get(void)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void cavium_ptp_put(struct cavium_ptp *ptp) {}
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+	return 0;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+	return -1;
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 4a02e61..4cacce5 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -263,6 +263,8 @@ struct nicvf_drv_stats {
 	struct u64_stats_sync   syncp;
 };
 
+struct cavium_ptp;
+
 struct nicvf {
 	struct nicvf		*pnicvf;
 	struct net_device	*netdev;
@@ -312,6 +314,33 @@ struct nicvf {
 	struct tasklet_struct	qs_err_task;
 	struct work_struct	reset_task;
 
+	/* PTP timestamp */
+	struct cavium_ptp	*ptp_clock;
+	/* Inbound timestamping is on */
+	bool			hw_rx_tstamp;
+	/* When the packet that requires timestamping is sent, hardware inserts
+	 * two entries to the completion queue.  First is the regular
+	 * CQE_TYPE_SEND entry that signals that the packet was sent.
+	 * The second is CQE_TYPE_SEND_PTP that contains the actual timestamp
+	 * for that packet.
+	 * `ptp_skb` is initialized in the handler for the CQE_TYPE_SEND
+	 * entry and is used and zeroed in the handler for the CQE_TYPE_SEND_PTP
+	 * entry.
+	 * So `ptp_skb` is used to hold the pointer to the packet between
+	 * the calls to CQE_TYPE_SEND and CQE_TYPE_SEND_PTP handlers.
+	 */
+	struct sk_buff		*ptp_skb;
+	/* `tx_ptp_skbs` is set when the hardware is sending a packet that
+	 * requires timestamping.  Cavium hardware can not process more than one
+	 * such packet at once so this is set each time the driver submits
+	 * a packet that requires timestamping to the send queue and clears
+	 * each time it receives the entry on the completion queue saying
+	 * that such packet was sent.
+	 * So `tx_ptp_skbs` prevents driver from submitting more than one
+	 * packet that requires timestamping to the hardware for transmitting.
+	 */
+	atomic_t		tx_ptp_skbs;
+
 	/* Interrupt coalescing settings */
 	u32			cq_coalesce_usecs;
 	u32			msg_enable;
@@ -371,6 +400,7 @@ struct nicvf {
 #define	NIC_MBOX_MSG_LOOPBACK		0x16	/* Set interface in loopback */
 #define	NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17	/* Reset statistics counters */
 #define	NIC_MBOX_MSG_PFC		0x18	/* Pause frame control */
+#define	NIC_MBOX_MSG_PTP_CFG		0x19	/* HW packet timestamp */
 #define	NIC_MBOX_MSG_CFG_DONE		0xF0	/* VF configuration done */
 #define	NIC_MBOX_MSG_SHUTDOWN		0xF1	/* VF is being shutdown */
 
@@ -521,6 +551,11 @@ struct pfc {
 	u8    fc_tx;
 };
 
+struct set_ptp {
+	u8    msg;
+	bool  enable;
+};
+
 /* 128 bit shared memory between PF and each VF */
 union nic_mbx {
 	struct { u8 msg; }	msg;
@@ -540,6 +575,7 @@ union nic_mbx {
 	struct set_loopback	lbk;
 	struct reset_stat_cfg	reset_stat;
 	struct pfc		pfc;
+	struct set_ptp		ptp;
 };
 
 #define NIC_NODE_ID_MASK	0x03
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 8f1dd55..8325577 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -426,13 +426,22 @@ static void nic_init_hw(struct nicpf *nic)
 	/* Enable backpressure */
 	nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
 
-	/* TNS and TNS bypass modes are present only on 88xx */
+	/* TNS and TNS bypass modes are present only on 88xx
+	 * Also offset of this CSR has changed in 81xx and 83xx.
+	 */
 	if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
 		/* Disable TNS mode on both interfaces */
 		nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
-			      (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+			      (NIC_TNS_BYPASS_MODE << 7) |
+			      BGX0_BLOCK | (1ULL << 16));
 		nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
-			      (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+			      (NIC_TNS_BYPASS_MODE << 7) |
+			      BGX1_BLOCK | (1ULL << 16));
+	} else {
+		/* Configure timestamp generation timeout to 10us */
+		for (i = 0; i < nic->hw->bgx_cnt; i++)
+			nic_reg_write(nic, NIC_PF_INTFX_SEND_CFG | (i << 3),
+				      (1ULL << 16));
 	}
 
 	nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
@@ -880,6 +889,44 @@ static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg)
 	}
 }
 
+/* Enable or disable HW timestamping by BGX for pkts received on a LMAC */
+static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
+{
+	struct pkind_cfg *pkind;
+	u8 lmac, bgx_idx;
+	u64 pkind_val, pkind_idx;
+
+	if (vf >= nic->num_vf_en)
+		return;
+
+	bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+	pkind_idx = lmac + bgx_idx * MAX_LMAC_PER_BGX;
+	pkind_val = nic_reg_read(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3));
+	pkind = (struct pkind_cfg *)&pkind_val;
+
+	if (ptp->enable && !pkind->hdr_sl) {
+		/* Skiplen to exclude 8byte timestamp while parsing pkt
+		 * If not configured, will result in L2 errors.
+		 */
+		pkind->hdr_sl = 4;
+		/* Adjust max packet length allowed */
+		pkind->maxlen += (pkind->hdr_sl * 2);
+		bgx_config_timestamping(nic->node, bgx_idx, lmac, true);
+		nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+			      (ETYPE_ALG_ENDPARSE << 16) | ETH_P_1588);
+	} else if (!ptp->enable && pkind->hdr_sl) {
+		pkind->maxlen -= (pkind->hdr_sl * 2);
+		pkind->hdr_sl = 0;
+		bgx_config_timestamping(nic->node, bgx_idx, lmac, false);
+		nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+			      (ETYPE_ALG_SKIP << 16) | ETH_P_8021Q);
+	}
+
+	nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
+}
+
 /* Interrupt handler to handle mailbox messages from VFs */
 static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
 {
@@ -1022,6 +1069,9 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
 	case NIC_MBOX_MSG_PFC:
 		nic_pause_frame(nic, vf, &mbx.pfc);
 		goto unlock;
+	case NIC_MBOX_MSG_PTP_CFG:
+		nic_config_timestamp(nic, vf, &mbx.ptp);
+		break;
 	default:
 		dev_err(&nic->pdev->dev,
 			"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index 80d4633..a16c48a 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -99,6 +99,7 @@
 #define   NIC_PF_ECC3_DBE_INT_W1S		(0x2708)
 #define   NIC_PF_ECC3_DBE_ENA_W1C		(0x2710)
 #define   NIC_PF_ECC3_DBE_ENA_W1S		(0x2718)
+#define   NIC_PF_INTFX_SEND_CFG			(0x4000)
 #define   NIC_PF_MCAM_0_191_ENA			(0x100000)
 #define   NIC_PF_MCAM_0_191_M_0_5_DATA		(0x110000)
 #define   NIC_PF_MCAM_CTRL			(0x120000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index b9ece9c..ed9f10b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -9,12 +9,14 @@
 /* ETHTOOL Support for VNIC_VF Device*/
 
 #include <linux/pci.h>
+#include <linux/net_tstamp.h>
 
 #include "nic_reg.h"
 #include "nic.h"
 #include "nicvf_queues.h"
 #include "q_struct.h"
 #include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
 
 #define DRV_NAME	"thunder-nicvf"
 #define DRV_VERSION     "1.0"
@@ -824,6 +826,31 @@ static int nicvf_set_pauseparam(struct net_device *dev,
 	return 0;
 }
 
+static int nicvf_get_ts_info(struct net_device *netdev,
+			     struct ethtool_ts_info *info)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+
+	if (!nic->ptp_clock)
+		return ethtool_op_get_ts_info(netdev, info);
+
+	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+				SOF_TIMESTAMPING_RX_SOFTWARE |
+				SOF_TIMESTAMPING_SOFTWARE |
+				SOF_TIMESTAMPING_TX_HARDWARE |
+				SOF_TIMESTAMPING_RX_HARDWARE |
+				SOF_TIMESTAMPING_RAW_HARDWARE;
+
+	info->phc_index = cavium_ptp_clock_index(nic->ptp_clock);
+
+	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+			   (1 << HWTSTAMP_FILTER_ALL);
+
+	return 0;
+}
+
 static const struct ethtool_ops nicvf_ethtool_ops = {
 	.get_link		= nicvf_get_link,
 	.get_drvinfo		= nicvf_get_drvinfo,
@@ -847,7 +874,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
 	.set_channels		= nicvf_set_channels,
 	.get_pauseparam         = nicvf_get_pauseparam,
 	.set_pauseparam         = nicvf_set_pauseparam,
-	.get_ts_info		= ethtool_op_get_ts_info,
+	.get_ts_info		= nicvf_get_ts_info,
 	.get_link_ksettings	= nicvf_get_link_ksettings,
 };
 
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 21618d0..881af8a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -20,11 +20,13 @@
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 #include <linux/filter.h>
+#include <linux/net_tstamp.h>
 
 #include "nic_reg.h"
 #include "nic.h"
 #include "nicvf_queues.h"
 #include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
 
 #define DRV_NAME	"thunder-nicvf"
 #define DRV_VERSION	"1.0"
@@ -602,6 +604,44 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
 	return false;
 }
 
+static void nicvf_snd_ptp_handler(struct net_device *netdev,
+				  struct cqe_send_t *cqe_tx)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+	struct skb_shared_hwtstamps ts;
+	u64 ns;
+
+	nic = nic->pnicvf;
+
+	/* Sync for 'ptp_skb' */
+	smp_rmb();
+
+	/* New timestamp request can be queued now */
+	atomic_set(&nic->tx_ptp_skbs, 0);
+
+	/* Check for timestamp requested skb */
+	if (!nic->ptp_skb)
+		return;
+
+	/* Check if timestamping is timedout, which is set to 10us */
+	if (cqe_tx->send_status == CQ_TX_ERROP_TSTMP_TIMEOUT ||
+	    cqe_tx->send_status == CQ_TX_ERROP_TSTMP_CONFLICT)
+		goto no_tstamp;
+
+	/* Get the timestamp */
+	memset(&ts, 0, sizeof(ts));
+	ns = cavium_ptp_tstamp2time(nic->ptp_clock, cqe_tx->ptp_timestamp);
+	ts.hwtstamp = ns_to_ktime(ns);
+	skb_tstamp_tx(nic->ptp_skb, &ts);
+
+no_tstamp:
+	/* Free the original skb */
+	dev_kfree_skb_any(nic->ptp_skb);
+	nic->ptp_skb = NULL;
+	/* Sync 'ptp_skb' */
+	smp_wmb();
+}
+
 static void nicvf_snd_pkt_handler(struct net_device *netdev,
 				  struct cqe_send_t *cqe_tx,
 				  int budget, int *subdesc_cnt,
@@ -658,7 +698,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
 		prefetch(skb);
 		(*tx_pkts)++;
 		*tx_bytes += skb->len;
-		napi_consume_skb(skb, budget);
+		/* If timestamp is requested for this skb, don't free it */
+		if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+		    !nic->pnicvf->ptp_skb)
+			nic->pnicvf->ptp_skb = skb;
+		else
+			napi_consume_skb(skb, budget);
 		sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
 	} else {
 		/* In case of SW TSO on 88xx, only last segment will have
@@ -697,6 +742,21 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
 	skb_set_hash(skb, hash, hash_type);
 }
 
+static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb)
+{
+	u64 ns;
+
+	if (!nic->ptp_clock || !nic->hw_rx_tstamp)
+		return;
+
+	/* The first 8 bytes is the timestamp */
+	ns = cavium_ptp_tstamp2time(nic->ptp_clock,
+				    be64_to_cpu(*(__be64 *)skb->data));
+	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+
+	__skb_pull(skb, 8);
+}
+
 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 				  struct napi_struct *napi,
 				  struct cqe_rx_t *cqe_rx,
@@ -748,6 +808,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 		return;
 	}
 
+	nicvf_set_rxtstamp(nic, skb);
 	nicvf_set_rxhash(netdev, cqe_rx, skb);
 
 	skb_record_rx_queue(skb, rq_idx);
@@ -823,10 +884,12 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
 					      &tx_pkts, &tx_bytes);
 			tx_done++;
 		break;
+		case CQE_TYPE_SEND_PTP:
+			nicvf_snd_ptp_handler(netdev, (void *)cq_desc);
+		break;
 		case CQE_TYPE_INVALID:
 		case CQE_TYPE_RX_SPLIT:
 		case CQE_TYPE_RX_TCP:
-		case CQE_TYPE_SEND_PTP:
 			/* Ignore for now */
 		break;
 		}
@@ -1322,12 +1385,28 @@ int nicvf_stop(struct net_device *netdev)
 
 	nicvf_free_cq_poll(nic);
 
+	/* Free any pending SKB saved to receive timestamp */
+	if (nic->ptp_skb) {
+		dev_kfree_skb_any(nic->ptp_skb);
+		nic->ptp_skb = NULL;
+	}
+
 	/* Clear multiqset info */
 	nic->pnicvf = nic;
 
 	return 0;
 }
 
+static int nicvf_config_hw_rx_tstamp(struct nicvf *nic, bool enable)
+{
+	union nic_mbx mbx = {};
+
+	mbx.ptp.msg = NIC_MBOX_MSG_PTP_CFG;
+	mbx.ptp.enable = enable;
+
+	return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
 static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
 {
 	union nic_mbx mbx = {};
@@ -1397,6 +1476,12 @@ int nicvf_open(struct net_device *netdev)
 	if (nic->sqs_mode)
 		nicvf_get_primary_vf_struct(nic);
 
+	/* Configure PTP timestamp */
+	if (nic->ptp_clock)
+		nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+	atomic_set(&nic->tx_ptp_skbs, 0);
+	nic->ptp_skb = NULL;
+
 	/* Configure receive side scaling and MTU */
 	if (!nic->sqs_mode) {
 		nicvf_rss_init(nic);
@@ -1823,6 +1908,73 @@ static void nicvf_xdp_flush(struct net_device *dev)
 	return;
 }
 
+static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+	struct hwtstamp_config config;
+	struct nicvf *nic = netdev_priv(netdev);
+
+	if (!nic->ptp_clock)
+		return -ENODEV;
+
+	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	/* reserved for future extensions */
+	if (config.flags)
+		return -EINVAL;
+
+	switch (config.tx_type) {
+	case HWTSTAMP_TX_OFF:
+	case HWTSTAMP_TX_ON:
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		nic->hw_rx_tstamp = false;
+		break;
+	case HWTSTAMP_FILTER_ALL:
+	case HWTSTAMP_FILTER_SOME:
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		nic->hw_rx_tstamp = true;
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	if (netif_running(netdev))
+		nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+
+	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+	switch (cmd) {
+	case SIOCSHWTSTAMP:
+		return nicvf_config_hwtstamp(netdev, req);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
 static const struct net_device_ops nicvf_netdev_ops = {
 	.ndo_open		= nicvf_open,
 	.ndo_stop		= nicvf_stop,
@@ -1836,6 +1988,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
 	.ndo_bpf		= nicvf_xdp,
 	.ndo_xdp_xmit		= nicvf_xdp_xmit,
 	.ndo_xdp_flush          = nicvf_xdp_flush,
+	.ndo_do_ioctl           = nicvf_ioctl,
 };
 
 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1845,6 +1998,16 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct nicvf *nic;
 	int    err, qcount;
 	u16    sdevid;
+	struct cavium_ptp *ptp_clock;
+
+	ptp_clock = cavium_ptp_get();
+	if (IS_ERR(ptp_clock)) {
+		if (PTR_ERR(ptp_clock) == -ENODEV)
+			/* In virtualized environment we proceed without ptp */
+			ptp_clock = NULL;
+		else
+			return PTR_ERR(ptp_clock);
+	}
 
 	err = pci_enable_device(pdev);
 	if (err) {
@@ -1899,6 +2062,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	 */
 	if (!nic->t88)
 		nic->max_queues *= 2;
+	nic->ptp_clock = ptp_clock;
 
 	/* MAP VF's configuration registers */
 	nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
@@ -2012,6 +2176,7 @@ static void nicvf_remove(struct pci_dev *pdev)
 	pci_set_drvdata(pdev, NULL);
 	if (nic->drv_stats)
 		free_percpu(nic->drv_stats);
+	cavium_ptp_put(nic->ptp_clock);
 	free_netdev(netdev);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 14e62c6..3eae9ff 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -982,6 +982,9 @@ void nicvf_qset_config(struct nicvf *nic, bool enable)
 		qs_cfg->be = 1;
 #endif
 		qs_cfg->vnic = qs->vnic_id;
+		/* Enable Tx timestamping capability */
+		if (nic->ptp_clock)
+			qs_cfg->send_tstmp_ena = 1;
 	}
 	nicvf_send_msg_to_pf(nic, &mbx);
 }
@@ -1389,6 +1392,29 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
 		hdr->inner_l3_offset = skb_network_offset(skb) - 2;
 		this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
 	}
+
+	/* Check if timestamp is requested */
+	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+		skb_tx_timestamp(skb);
+		return;
+	}
+
+	/* Tx timestamping not supported along with TSO, so ignore request */
+	if (skb_shinfo(skb)->gso_size)
+		return;
+
+	/* HW supports only a single outstanding packet to timestamp */
+	if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1))
+		return;
+
+	/* Mark the SKB for later reference */
+	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+	/* Finally enable timestamp generation
+	 * Since 'post_cqe' is also set, two CQEs will be posted
+	 * for this packet i.e CQE_TYPE_SEND and CQE_TYPE_SEND_PTP.
+	 */
+	hdr->tstmp = 1;
 }
 
 /* SQ GATHER subdescriptor
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 5e5c4d7..0f23999 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -245,6 +245,35 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
 }
 EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
 
+/* Enables or disables timestamp insertion by BGX for Rx packets */
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable)
+{
+	struct bgx *bgx = get_bgx(node, bgx_idx);
+	struct lmac *lmac;
+	u64 csr_offset, cfg;
+
+	if (!bgx)
+		return;
+
+	lmac = &bgx->lmac[lmacid];
+
+	if (lmac->lmac_type == BGX_MODE_SGMII ||
+	    lmac->lmac_type == BGX_MODE_QSGMII ||
+	    lmac->lmac_type == BGX_MODE_RGMII)
+		csr_offset = BGX_GMP_GMI_RXX_FRM_CTL;
+	else
+		csr_offset = BGX_SMUX_RX_FRM_CTL;
+
+	cfg = bgx_reg_read(bgx, lmacid, csr_offset);
+
+	if (enable)
+		cfg |= BGX_PKT_RX_PTP_EN;
+	else
+		cfg &= ~BGX_PKT_RX_PTP_EN;
+	bgx_reg_write(bgx, lmacid, csr_offset, cfg);
+}
+EXPORT_SYMBOL(bgx_config_timestamping);
+
 void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
 {
 	struct pfc *pfc = (struct pfc *)pause;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 23acdc5..5a7567d 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -122,6 +122,8 @@
 #define  SPU_DBG_CTL_AN_NONCE_MCT_DIS		BIT_ULL(29)
 
 #define BGX_SMUX_RX_INT			0x20000
+#define BGX_SMUX_RX_FRM_CTL		0x20020
+#define  BGX_PKT_RX_PTP_EN			BIT_ULL(12)
 #define BGX_SMUX_RX_JABBER		0x20030
 #define BGX_SMUX_RX_CTL			0x20048
 #define  SMU_RX_CTL_STATUS			(3ull << 0)
@@ -172,6 +174,7 @@
 #define  GMI_PORT_CFG_SPEED_MSB			BIT_ULL(8)
 #define  GMI_PORT_CFG_RX_IDLE			BIT_ULL(12)
 #define  GMI_PORT_CFG_TX_IDLE			BIT_ULL(13)
+#define BGX_GMP_GMI_RXX_FRM_CTL		0x38028
 #define BGX_GMP_GMI_RXX_JABBER		0x38038
 #define BGX_GMP_GMI_TXX_THRESH		0x38210
 #define BGX_GMP_GMI_TXX_APPEND		0x38218
@@ -223,6 +226,7 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
 void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
 void bgx_lmac_internal_loopback(int node, int bgx_idx,
 				int lmac_idx, bool enable);
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable);
 void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause);
 void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause);
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index baa67d3..f05b58f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -312,6 +312,7 @@ struct vpd_params {
 };
 
 struct pci_params {
+	unsigned int vpd_cap_addr;
 	unsigned char speed;
 	unsigned char width;
 };
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 3293980..11fe596 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -102,7 +102,9 @@ const char cxgb4_driver_version[] = DRV_VERSION;
  */
 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
 	static const struct pci_device_id cxgb4_pci_tbl[] = {
-#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+#define CXGB4_UNIFIED_PF 0x4
+
+#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
 
 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
  * called for both.
@@ -110,7 +112,7 @@ const char cxgb4_driver_version[] = DRV_VERSION;
 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
 
 #define CH_PCI_ID_TABLE_ENTRY(devid) \
-		{PCI_VDEVICE(CHELSIO, (devid)), 4}
+		{PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
 
 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
 		{ 0, } \
@@ -2605,7 +2607,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
 }
 
 #ifdef CONFIG_PCI_IOV
-static int dummy_open(struct net_device *dev)
+static int cxgb4_mgmt_open(struct net_device *dev)
 {
 	/* Turn carrier off since we don't have to transmit anything on this
 	 * interface.
@@ -2615,39 +2617,44 @@ static int dummy_open(struct net_device *dev)
 }
 
 /* Fill MAC address that will be assigned by the FW */
-static void fill_vf_station_mac_addr(struct adapter *adap)
+static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
 {
-	unsigned int i;
 	u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
+	unsigned int i, vf, nvfs;
+	u16 a, b;
 	int err;
 	u8 *na;
-	u16 a, b;
 
+	adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
+							    PCI_CAP_ID_VPD);
 	err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
-	if (!err) {
-		na = adap->params.vpd.na;
-		for (i = 0; i < ETH_ALEN; i++)
-			hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
-				      hex2val(na[2 * i + 1]));
-		a = (hw_addr[0] << 8) | hw_addr[1];
-		b = (hw_addr[1] << 8) | hw_addr[2];
-		a ^= b;
-		a |= 0x0200;    /* locally assigned Ethernet MAC address */
-		a &= ~0x0100;   /* not a multicast Ethernet MAC address */
-		macaddr[0] = a >> 8;
-		macaddr[1] = a & 0xff;
+	if (err)
+		return;
 
-		for (i = 2; i < 5; i++)
-			macaddr[i] = hw_addr[i + 1];
+	na = adap->params.vpd.na;
+	for (i = 0; i < ETH_ALEN; i++)
+		hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+			      hex2val(na[2 * i + 1]));
 
-		for (i = 0; i < adap->num_vfs; i++) {
-			macaddr[5] = adap->pf * 16 + i;
-			ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr);
-		}
+	a = (hw_addr[0] << 8) | hw_addr[1];
+	b = (hw_addr[1] << 8) | hw_addr[2];
+	a ^= b;
+	a |= 0x0200;    /* locally assigned Ethernet MAC address */
+	a &= ~0x0100;   /* not a multicast Ethernet MAC address */
+	macaddr[0] = a >> 8;
+	macaddr[1] = a & 0xff;
+
+	for (i = 2; i < 5; i++)
+		macaddr[i] = hw_addr[i + 1];
+
+	for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
+		vf < nvfs; vf++) {
+		macaddr[5] = adap->pf * 16 + vf;
+		ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
 	}
 }
 
-static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
 {
 	struct port_info *pi = netdev_priv(dev);
 	struct adapter *adap = pi->adapter;
@@ -2669,8 +2676,8 @@ static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
 	return ret;
 }
 
-static int cxgb_get_vf_config(struct net_device *dev,
-			      int vf, struct ifla_vf_info *ivi)
+static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
+				    int vf, struct ifla_vf_info *ivi)
 {
 	struct port_info *pi = netdev_priv(dev);
 	struct adapter *adap = pi->adapter;
@@ -2684,8 +2691,8 @@ static int cxgb_get_vf_config(struct net_device *dev,
 	return 0;
 }
 
-static int cxgb_get_phys_port_id(struct net_device *dev,
-				 struct netdev_phys_item_id *ppid)
+static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
+				       struct netdev_phys_item_id *ppid)
 {
 	struct port_info *pi = netdev_priv(dev);
 	unsigned int phy_port_id;
@@ -2696,8 +2703,8 @@ static int cxgb_get_phys_port_id(struct net_device *dev,
 	return 0;
 }
 
-static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
-			    int max_tx_rate)
+static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
+				  int min_tx_rate, int max_tx_rate)
 {
 	struct port_info *pi = netdev_priv(dev);
 	struct adapter *adap = pi->adapter;
@@ -3172,15 +3179,16 @@ static const struct net_device_ops cxgb4_netdev_ops = {
 
 #ifdef CONFIG_PCI_IOV
 static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
-	.ndo_open             = dummy_open,
-	.ndo_set_vf_mac       = cxgb_set_vf_mac,
-	.ndo_get_vf_config    = cxgb_get_vf_config,
-	.ndo_set_vf_rate      = cxgb_set_vf_rate,
-	.ndo_get_phys_port_id = cxgb_get_phys_port_id,
+	.ndo_open             = cxgb4_mgmt_open,
+	.ndo_set_vf_mac       = cxgb4_mgmt_set_vf_mac,
+	.ndo_get_vf_config    = cxgb4_mgmt_get_vf_config,
+	.ndo_set_vf_rate      = cxgb4_mgmt_set_vf_rate,
+	.ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
 };
 #endif
 
-static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
+				   struct ethtool_drvinfo *info)
 {
 	struct adapter *adapter = netdev2adap(dev);
 
@@ -3192,7 +3200,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 }
 
 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
-	.get_drvinfo       = get_drvinfo,
+	.get_drvinfo       = cxgb4_mgmt_get_drvinfo,
 };
 
 void t4_fatal_err(struct adapter *adap)
@@ -4908,7 +4916,7 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
 }
 
 #ifdef CONFIG_PCI_IOV
-static void dummy_setup(struct net_device *dev)
+static void cxgb4_mgmt_setup(struct net_device *dev)
 {
 	dev->type = ARPHRD_NONE;
 	dev->mtu = 0;
@@ -4924,38 +4932,6 @@ static void dummy_setup(struct net_device *dev)
 	dev->needs_free_netdev = true;
 }
 
-static int config_mgmt_dev(struct pci_dev *pdev)
-{
-	struct adapter *adap = pci_get_drvdata(pdev);
-	struct net_device *netdev;
-	struct port_info *pi;
-	char name[IFNAMSIZ];
-	int err;
-
-	snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
-	netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN,
-			      dummy_setup);
-	if (!netdev)
-		return -ENOMEM;
-
-	pi = netdev_priv(netdev);
-	pi->adapter = adap;
-	pi->tx_chan = adap->pf % adap->params.nports;
-	SET_NETDEV_DEV(netdev, &pdev->dev);
-
-	adap->port[0] = netdev;
-	pi->port_id = 0;
-
-	err = register_netdev(adap->port[0]);
-	if (err) {
-		pr_info("Unable to register VF mgmt netdev %s\n", name);
-		free_netdev(adap->port[0]);
-		adap->port[0] = NULL;
-		return err;
-	}
-	return 0;
-}
-
 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
 {
 	struct adapter *adap = pci_get_drvdata(pdev);
@@ -4967,7 +4943,7 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
 	/* Check if cxgb4 is the MASTER and fw is initialized */
 	if (!(pcie_fw & PCIE_FW_INIT_F) ||
 	    !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
-	    PCIE_FW_MASTER_G(pcie_fw) != 4) {
+	    PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) {
 		dev_warn(&pdev->dev,
 			 "cxgb4 driver needs to be MASTER to support SRIOV\n");
 		return -EOPNOTSUPP;
@@ -4979,46 +4955,132 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
 	if (current_vfs && pci_vfs_assigned(pdev)) {
 		dev_err(&pdev->dev,
 			"Cannot modify SR-IOV while VFs are assigned\n");
-		num_vfs = current_vfs;
-		return num_vfs;
+		return current_vfs;
 	}
-
-	/* Disable SRIOV when zero is passed.
-	 * One needs to disable SRIOV before modifying it, else
-	 * stack throws the below warning:
-	 * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
+	/* Note that the upper-level code ensures that we're never called with
+	 * a non-zero "num_vfs" when we already have VFs instantiated.  But
+	 * it never hurts to code defensively.
 	 */
+	if (num_vfs != 0 && current_vfs != 0)
+		return -EBUSY;
+
+	/* Nothing to do for no change. */
+	if (num_vfs == current_vfs)
+		return num_vfs;
+
+	/* Disable SRIOV when zero is passed. */
 	if (!num_vfs) {
 		pci_disable_sriov(pdev);
-		if (adap->port[0]) {
-			unregister_netdev(adap->port[0]);
-			adap->port[0] = NULL;
-		}
+		/* free VF Management Interface */
+		unregister_netdev(adap->port[0]);
+		free_netdev(adap->port[0]);
+		adap->port[0] = NULL;
+
 		/* free VF resources */
+		adap->num_vfs = 0;
 		kfree(adap->vfinfo);
 		adap->vfinfo = NULL;
-		adap->num_vfs = 0;
-		return num_vfs;
+		return 0;
 	}
 
-	if (num_vfs != current_vfs) {
-		err = pci_enable_sriov(pdev, num_vfs);
-		if (err)
-			return err;
+	if (!current_vfs) {
+		struct fw_pfvf_cmd port_cmd, port_rpl;
+		struct net_device *netdev;
+		unsigned int pmask, port;
+		struct pci_dev *pbridge;
+		struct port_info *pi;
+		char name[IFNAMSIZ];
+		u32 devcap2;
+		u16 flags;
+		int pos;
 
-		adap->num_vfs = num_vfs;
-		err = config_mgmt_dev(pdev);
+		/* If we want to instantiate Virtual Functions, then our
+		 * parent bridge's PCI-E needs to support Alternative Routing
+		 * ID (ARI) because our VFs will show up at function offset 8
+		 * and above.
+		 */
+		pbridge = pdev->bus->self;
+		pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
+		pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
+		pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
+
+		if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
+		    !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
+			/* Our parent bridge does not support ARI so issue a
+			 * warning and skip instantiating the VFs.  They
+			 * won't be reachable.
+			 */
+			dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
+				 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
+				 PCI_FUNC(pbridge->devfn));
+			return -ENOTSUPP;
+		}
+		memset(&port_cmd, 0, sizeof(port_cmd));
+		port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+						 FW_CMD_REQUEST_F |
+						 FW_CMD_READ_F |
+						 FW_PFVF_CMD_PFN_V(adap->pf) |
+						 FW_PFVF_CMD_VFN_V(0));
+		port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
+		err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
+				 &port_rpl);
 		if (err)
 			return err;
+		pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
+		port = ffs(pmask) - 1;
+		/* Allocate VF Management Interface. */
+		snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
+			 adap->pf);
+		netdev = alloc_netdev(sizeof(struct port_info),
+				      name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
+		if (!netdev)
+			return -ENOMEM;
+
+		pi = netdev_priv(netdev);
+		pi->adapter = adap;
+		pi->lport = port;
+		pi->tx_chan = port;
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+
+		adap->port[0] = netdev;
+		pi->port_id = 0;
+
+		err = register_netdev(adap->port[0]);
+		if (err) {
+			pr_info("Unable to register VF mgmt netdev %s\n", name);
+			free_netdev(adap->port[0]);
+			adap->port[0] = NULL;
+			return err;
+		}
+		/* Allocate and set up VF Information. */
+		adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
+				       sizeof(struct vf_info), GFP_KERNEL);
+		if (!adap->vfinfo) {
+			unregister_netdev(adap->port[0]);
+			free_netdev(adap->port[0]);
+			adap->port[0] = NULL;
+			return -ENOMEM;
+		}
+		cxgb4_mgmt_fill_vf_station_mac_addr(adap);
+	}
+	/* Instantiate the requested number of VFs. */
+	err = pci_enable_sriov(pdev, num_vfs);
+	if (err) {
+		pr_info("Unable to instantiate %d VFs\n", num_vfs);
+		if (!current_vfs) {
+			unregister_netdev(adap->port[0]);
+			free_netdev(adap->port[0]);
+			adap->port[0] = NULL;
+			kfree(adap->vfinfo);
+			adap->vfinfo = NULL;
+		}
+		return err;
 	}
 
-	adap->vfinfo = kcalloc(adap->num_vfs,
-			       sizeof(struct vf_info), GFP_KERNEL);
-	if (adap->vfinfo)
-		fill_vf_station_mac_addr(adap);
+	adap->num_vfs = num_vfs;
 	return num_vfs;
 }
-#endif
+#endif /* CONFIG_PCI_IOV */
 
 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
@@ -5031,9 +5093,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	u32 whoami, pl_rev;
 	enum chip_type chip;
 	static int adap_idx = 1;
-#ifdef CONFIG_PCI_IOV
-	u32 v, port_vec;
-#endif
 
 	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
 
@@ -5057,6 +5116,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto out_disable_device;
 	}
 
+	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+	if (!adapter) {
+		err = -ENOMEM;
+		goto out_unmap_bar0;
+	}
+
+	adapter->regs = regs;
 	err = t4_wait_dev_ready(regs);
 	if (err < 0)
 		goto out_unmap_bar0;
@@ -5067,48 +5133,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	chip = get_chip_type(pdev, pl_rev);
 	func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
 		SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
-	if (func != ent->driver_data) {
-#ifndef CONFIG_PCI_IOV
-		iounmap(regs);
-#endif
-		pci_disable_device(pdev);
-		pci_save_state(pdev);        /* to restore SR-IOV later */
-		goto sriov;
-	}
 
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-		highdma = true;
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-		if (err) {
-			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
-				"coherent allocations\n");
-			goto out_unmap_bar0;
-		}
-	} else {
-		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (err) {
-			dev_err(&pdev->dev, "no usable DMA configuration\n");
-			goto out_unmap_bar0;
-		}
-	}
-
-	pci_enable_pcie_error_reporting(pdev);
-	pci_set_master(pdev);
-	pci_save_state(pdev);
-
-	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
-	if (!adapter) {
-		err = -ENOMEM;
-		goto out_unmap_bar0;
-	}
-	adap_idx++;
-
-	adapter->workq = create_singlethread_workqueue("cxgb4");
-	if (!adapter->workq) {
-		err = -ENOMEM;
-		goto out_free_adapter;
-	}
-
+	adapter->pdev = pdev;
+	adapter->pdev_dev = &pdev->dev;
+	adapter->name = pci_name(pdev);
+	adapter->mbox = func;
+	adapter->pf = func;
+	adapter->msg_enable = DFLT_MSG_ENABLE;
 	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
 				    (sizeof(struct mbox_cmd) *
 				     T4_OS_LOG_MBOX_CMDS),
@@ -5117,18 +5148,46 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		err = -ENOMEM;
 		goto out_free_adapter;
 	}
+	spin_lock_init(&adapter->mbox_lock);
+	INIT_LIST_HEAD(&adapter->mlist.list);
+	pci_set_drvdata(pdev, adapter);
+
+	if (func != ent->driver_data) {
+		pci_disable_device(pdev);
+		pci_save_state(pdev);        /* to restore SR-IOV later */
+		return 0;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		highdma = true;
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (err) {
+			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
+				"coherent allocations\n");
+			goto out_free_adapter;
+		}
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev, "no usable DMA configuration\n");
+			goto out_free_adapter;
+		}
+	}
+
+	pci_enable_pcie_error_reporting(pdev);
+	pci_set_master(pdev);
+	pci_save_state(pdev);
+	adap_idx++;
+	adapter->workq = create_singlethread_workqueue("cxgb4");
+	if (!adapter->workq) {
+		err = -ENOMEM;
+		goto out_free_adapter;
+	}
+
 	adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
 
 	/* PCI device has been enabled */
 	adapter->flags |= DEV_ENABLED;
-
-	adapter->regs = regs;
-	adapter->pdev = pdev;
-	adapter->pdev_dev = &pdev->dev;
-	adapter->name = pci_name(pdev);
-	adapter->mbox = func;
-	adapter->pf = func;
-	adapter->msg_enable = DFLT_MSG_ENABLE;
 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
 
 	/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
@@ -5151,9 +5210,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	spin_lock_init(&adapter->stats_lock);
 	spin_lock_init(&adapter->tid_release_lock);
 	spin_lock_init(&adapter->win0_lock);
-	spin_lock_init(&adapter->mbox_lock);
-
-	INIT_LIST_HEAD(&adapter->mlist.list);
 
 	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
 	INIT_WORK(&adapter->db_full_task, process_db_full);
@@ -5426,58 +5482,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	setup_fw_sge_queues(adapter);
 	return 0;
 
-sriov:
-#ifdef CONFIG_PCI_IOV
-	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
-	if (!adapter) {
-		err = -ENOMEM;
-		goto free_pci_region;
-	}
-
-	adapter->pdev = pdev;
-	adapter->pdev_dev = &pdev->dev;
-	adapter->name = pci_name(pdev);
-	adapter->mbox = func;
-	adapter->pf = func;
-	adapter->regs = regs;
-	adapter->adap_idx = adap_idx;
-	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
-				    (sizeof(struct mbox_cmd) *
-				     T4_OS_LOG_MBOX_CMDS),
-				    GFP_KERNEL);
-	if (!adapter->mbox_log) {
-		err = -ENOMEM;
-		goto free_adapter;
-	}
-	spin_lock_init(&adapter->mbox_lock);
-	INIT_LIST_HEAD(&adapter->mlist.list);
-
-	v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
-	    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
-	err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
-			      &v, &port_vec);
-	if (err < 0) {
-		dev_err(adapter->pdev_dev, "Could not fetch port params\n");
-		goto free_mbox_log;
-	}
-
-	adapter->params.nports = hweight32(port_vec);
-	pci_set_drvdata(pdev, adapter);
-	return 0;
-
-free_mbox_log:
-	kfree(adapter->mbox_log);
- free_adapter:
-	kfree(adapter);
- free_pci_region:
-	iounmap(regs);
-	pci_disable_sriov(pdev);
-	pci_release_regions(pdev);
-	return err;
-#else
-	return 0;
-#endif
-
  out_free_dev:
 	free_some_resources(adapter);
 	if (adapter->flags & USING_MSIX)
@@ -5569,14 +5573,7 @@ static void remove_one(struct pci_dev *pdev)
 	}
 #ifdef CONFIG_PCI_IOV
 	else {
-		if (adapter->port[0])
-			unregister_netdev(adapter->port[0]);
-		iounmap(adapter->regs);
-		kfree(adapter->vfinfo);
-		kfree(adapter->mbox_log);
-		kfree(adapter);
-		pci_disable_sriov(pdev);
-		pci_release_regions(pdev);
+		cxgb4_iov_configure(adapter->pdev, 0);
 	}
 #endif
 }
@@ -5620,18 +5617,6 @@ static void shutdown_one(struct pci_dev *pdev)
 		if (adapter->flags & FW_OK)
 			t4_fw_bye(adapter, adapter->mbox);
 	}
-#ifdef CONFIG_PCI_IOV
-	else {
-		if (adapter->port[0])
-			unregister_netdev(adapter->port[0]);
-		iounmap(adapter->regs);
-		kfree(adapter->vfinfo);
-		kfree(adapter->mbox_log);
-		kfree(adapter);
-		pci_disable_sriov(pdev);
-		pci_release_regions(pdev);
-	}
-#endif
 }
 
 static struct pci_driver cxgb4_driver = {
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 410a0a9..b3e7faf 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1913,3 +1913,7 @@ static struct platform_driver cs89x0_driver = {
 module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
 
 #endif /* CONFIG_CS89x0_PLATFORM */
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 network driver");
+MODULE_AUTHOR("Russell Nelson <nelson@crynwr.com>");
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 461014b..736df59 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -757,6 +757,12 @@ static int ibmvnic_login(struct net_device *netdev)
 		}
 	} while (adapter->renegotiate);
 
+	/* handle pending MAC address changes after successful login */
+	if (adapter->mac_change_pending) {
+		__ibmvnic_set_mac(netdev, &adapter->desired.mac);
+		adapter->mac_change_pending = false;
+	}
+
 	return 0;
 }
 
@@ -994,11 +1000,6 @@ static int ibmvnic_open(struct net_device *netdev)
 
 	mutex_lock(&adapter->reset_lock);
 
-	if (adapter->mac_change_pending) {
-		__ibmvnic_set_mac(netdev, &adapter->desired.mac);
-		adapter->mac_change_pending = false;
-	}
-
 	if (adapter->state != VNIC_CLOSED) {
 		rc = ibmvnic_login(netdev);
 		if (rc) {
@@ -1532,7 +1533,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 	struct sockaddr *addr = p;
 
-	if (adapter->state != VNIC_OPEN) {
+	if (adapter->state == VNIC_PROBED) {
 		memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
 		adapter->mac_change_pending = true;
 		return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index d629da2..7b98859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -865,7 +865,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 			   u16 vid);
 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
-void mlx5e_timestamp_set(struct mlx5e_priv *priv);
+void mlx5e_timestamp_init(struct mlx5e_priv *priv);
 
 struct mlx5e_redirect_rqt_param {
 	bool is_rss;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 9bcf38f..3d46ef4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -922,8 +922,9 @@ static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
 
 static void mlx5e_ets_init(struct mlx5e_priv *priv)
 {
-	int i;
 	struct ieee_ets ets;
+	int err;
+	int i;
 
 	if (!MLX5_CAP_GEN(priv->mdev, ets))
 		return;
@@ -936,11 +937,16 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
 		ets.prio_tc[i] = i;
 	}
 
-	/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
-	ets.prio_tc[0] = 1;
-	ets.prio_tc[1] = 0;
+	if (ets.ets_cap > 1) {
+		/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
+		ets.prio_tc[0] = 1;
+		ets.prio_tc[1] = 0;
+	}
 
-	mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+	err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+	if (err)
+		netdev_err(priv->netdev,
+			   "%s, Failed to init ETS: %d\n", __func__, err);
 }
 
 enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index bd5af7f..2d13950 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -207,8 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
 		return;
 
 	mutex_lock(&priv->state_lock);
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-		mlx5e_update_stats(priv, true);
+	mlx5e_update_stats(priv, true);
 	mutex_unlock(&priv->state_lock);
 
 	for (i = 0; i < mlx5e_num_stats_grps; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bbbdb5c..466a4e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2685,7 +2685,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
 		netif_carrier_on(netdev);
 }
 
-void mlx5e_timestamp_set(struct mlx5e_priv *priv)
+void mlx5e_timestamp_init(struct mlx5e_priv *priv)
 {
 	priv->tstamp.tx_type   = HWTSTAMP_TX_OFF;
 	priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -2706,7 +2706,6 @@ int mlx5e_open_locked(struct net_device *netdev)
 	mlx5e_activate_priv_channels(priv);
 	if (priv->profile->update_carrier)
 		priv->profile->update_carrier(priv);
-	mlx5e_timestamp_set(priv);
 
 	if (priv->profile->update_stats)
 		queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -3238,12 +3237,12 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
 	return 0;
 }
 
-#define MLX5E_SET_FEATURE(netdev, feature, enable)	\
+#define MLX5E_SET_FEATURE(features, feature, enable)	\
 	do {						\
 		if (enable)				\
-			netdev->features |= feature;	\
+			*features |= feature;		\
 		else					\
-			netdev->features &= ~feature;	\
+			*features &= ~feature;		\
 	} while (0)
 
 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
@@ -3366,6 +3365,7 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
 #endif
 
 static int mlx5e_handle_feature(struct net_device *netdev,
+				netdev_features_t *features,
 				netdev_features_t wanted_features,
 				netdev_features_t feature,
 				mlx5e_feature_handler feature_handler)
@@ -3384,34 +3384,40 @@ static int mlx5e_handle_feature(struct net_device *netdev,
 		return err;
 	}
 
-	MLX5E_SET_FEATURE(netdev, feature, enable);
+	MLX5E_SET_FEATURE(features, feature, enable);
 	return 0;
 }
 
 static int mlx5e_set_features(struct net_device *netdev,
 			      netdev_features_t features)
 {
+	netdev_features_t oper_features = netdev->features;
 	int err;
 
-	err  = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
-				    set_feature_lro);
-	err |= mlx5e_handle_feature(netdev, features,
+	err  = mlx5e_handle_feature(netdev, &oper_features, features,
+				    NETIF_F_LRO, set_feature_lro);
+	err |= mlx5e_handle_feature(netdev, &oper_features, features,
 				    NETIF_F_HW_VLAN_CTAG_FILTER,
 				    set_feature_cvlan_filter);
-	err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
-				    set_feature_tc_num_filters);
-	err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
-				    set_feature_rx_all);
-	err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
-				    set_feature_rx_fcs);
-	err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
-				    set_feature_rx_vlan);
+	err |= mlx5e_handle_feature(netdev, &oper_features, features,
+				    NETIF_F_HW_TC, set_feature_tc_num_filters);
+	err |= mlx5e_handle_feature(netdev, &oper_features, features,
+				    NETIF_F_RXALL, set_feature_rx_all);
+	err |= mlx5e_handle_feature(netdev, &oper_features, features,
+				    NETIF_F_RXFCS, set_feature_rx_fcs);
+	err |= mlx5e_handle_feature(netdev, &oper_features, features,
+				    NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
 #ifdef CONFIG_RFS_ACCEL
-	err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
-				    set_feature_arfs);
+	err |= mlx5e_handle_feature(netdev, &oper_features, features,
+				    NETIF_F_NTUPLE, set_feature_arfs);
 #endif
 
-	return err ? -EINVAL : 0;
+	if (err) {
+		netdev->features = oper_features;
+		return -EINVAL;
+	}
+
+	return 0;
 }
 
 static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
@@ -4167,6 +4173,8 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
 	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
 	INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
 	INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+
+	mlx5e_timestamp_init(priv);
 }
 
 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 4d1b0ff..10fa6a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -934,6 +934,8 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
 
 	mlx5e_build_rep_params(mdev, &priv->channels.params);
 	mlx5e_build_rep_netdev(netdev);
+
+	mlx5e_timestamp_init(priv);
 }
 
 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 1f1f8af..5a46082 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -238,15 +238,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
 	int err = 0;
 
 	/* Temporarily enable local_lb */
-	if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
-		mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
-		if (!lbtp->local_lb)
-			mlx5_nic_vport_update_local_lb(priv->mdev, true);
+	err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
+	if (err)
+		return err;
+
+	if (!lbtp->local_lb) {
+		err = mlx5_nic_vport_update_local_lb(priv->mdev, true);
+		if (err)
+			return err;
 	}
 
 	err = mlx5e_refresh_tirs(priv, true);
 	if (err)
-		return err;
+		goto out;
 
 	lbtp->loopback_ok = false;
 	init_completion(&lbtp->comp);
@@ -256,16 +260,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
 	lbtp->pt.dev = priv->netdev;
 	lbtp->pt.af_packet_priv = lbtp;
 	dev_add_pack(&lbtp->pt);
+
+	return 0;
+
+out:
+	if (!lbtp->local_lb)
+		mlx5_nic_vport_update_local_lb(priv->mdev, false);
+
 	return err;
 }
 
 static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
 					struct mlx5e_lbt_priv *lbtp)
 {
-	if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
-		if (!lbtp->local_lb)
-			mlx5_nic_vport_update_local_lb(priv->mdev, false);
-	}
+	if (!lbtp->local_lb)
+		mlx5_nic_vport_update_local_lb(priv->mdev, false);
 
 	dev_remove_pack(&lbtp->pt);
 	mlx5e_refresh_tirs(priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 3b2363e..ef1e787 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -85,6 +85,8 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
 	mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
 	mlx5i_build_nic_params(mdev, &priv->channels.params);
 
+	mlx5e_timestamp_init(priv);
+
 	/* netdev init */
 	netdev->hw_features    |= NETIF_F_SG;
 	netdev->hw_features    |= NETIF_F_IP_CSUM;
@@ -449,7 +451,6 @@ static int mlx5i_open(struct net_device *netdev)
 
 	mlx5e_refresh_tirs(epriv, false);
 	mlx5e_activate_priv_channels(epriv);
-	mlx5e_timestamp_set(epriv);
 
 	mutex_unlock(&epriv->state_lock);
 	return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index fa8aed6..5701f12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -423,9 +423,13 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
 
 	switch (clock->ptp_info.pin_config[pin].func) {
 	case PTP_PF_EXTTS:
+		ptp_event.index = pin;
+		ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
+					be64_to_cpu(eqe->data.pps.time_stamp));
 		if (clock->pps_info.enabled) {
 			ptp_event.type = PTP_CLOCK_PPSUSR;
-			ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp);
+			ptp_event.pps_times.ts_real =
+					ns_to_timespec64(ptp_event.timestamp);
 		} else {
 			ptp_event.type = PTP_CLOCK_EXTTS;
 		}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8a89c7e..0f88fd3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -319,6 +319,7 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
 	struct mlx5_eq_table *table = &priv->eq_table;
 	int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
 	int nvec;
+	int err;
 
 	nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
 	       MLX5_EQ_VEC_COMP_BASE;
@@ -328,21 +329,23 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
 
 	priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
 	if (!priv->irq_info)
-		goto err_free_msix;
+		return -ENOMEM;
 
 	nvec = pci_alloc_irq_vectors(dev->pdev,
 			MLX5_EQ_VEC_COMP_BASE + 1, nvec,
 			PCI_IRQ_MSIX);
-	if (nvec < 0)
-		return nvec;
+	if (nvec < 0) {
+		err = nvec;
+		goto err_free_irq_info;
+	}
 
 	table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
 
 	return 0;
 
-err_free_msix:
+err_free_irq_info:
 	kfree(priv->irq_info);
-	return -ENOMEM;
+	return err;
 }
 
 static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
@@ -578,8 +581,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
 	int ret = 0;
 
 	/* Disable local_lb by default */
-	if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
-	    MLX5_CAP_GEN(dev, disable_local_lb))
+	if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
 		ret = mlx5_nic_vport_update_local_lb(dev, false);
 
 	return ret;
@@ -1121,9 +1123,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 		goto err_stop_poll;
 	}
 
-	if (boot && mlx5_init_once(dev, priv)) {
-		dev_err(&pdev->dev, "sw objs init failed\n");
-		goto err_stop_poll;
+	if (boot) {
+		err = mlx5_init_once(dev, priv);
+		if (err) {
+			dev_err(&pdev->dev, "sw objs init failed\n");
+			goto err_stop_poll;
+		}
 	}
 
 	err = mlx5_alloc_irq_vectors(dev);
@@ -1133,8 +1138,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 	}
 
 	dev->priv.uar = mlx5_get_uars_page(dev);
-	if (!dev->priv.uar) {
+	if (IS_ERR(dev->priv.uar)) {
 		dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
+		err = PTR_ERR(dev->priv.uar);
 		goto err_disable_msix;
 	}
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 222b259..8b97066 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -168,18 +168,16 @@ struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
 	struct mlx5_uars_page *ret;
 
 	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
-	if (list_empty(&mdev->priv.bfregs.reg_head.list)) {
-		ret = alloc_uars_page(mdev, false);
-		if (IS_ERR(ret)) {
-			ret = NULL;
-			goto out;
-		}
-		list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
-	} else {
+	if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
 		ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
 				       struct mlx5_uars_page, list);
 		kref_get(&ret->ref_count);
+		goto out;
 	}
+	ret = alloc_uars_page(mdev, false);
+	if (IS_ERR(ret))
+		goto out;
+	list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
 out:
 	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index d653b00..a1296a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -908,23 +908,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
 	void *in;
 	int err;
 
-	mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable");
+	if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
+	    !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
+		return 0;
+
 	in = kvzalloc(inlen, GFP_KERNEL);
 	if (!in)
 		return -ENOMEM;
 
 	MLX5_SET(modify_nic_vport_context_in, in,
-		 field_select.disable_mc_local_lb, 1);
-	MLX5_SET(modify_nic_vport_context_in, in,
 		 nic_vport_context.disable_mc_local_lb, !enable);
-
-	MLX5_SET(modify_nic_vport_context_in, in,
-		 field_select.disable_uc_local_lb, 1);
 	MLX5_SET(modify_nic_vport_context_in, in,
 		 nic_vport_context.disable_uc_local_lb, !enable);
 
+	if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
+		MLX5_SET(modify_nic_vport_context_in, in,
+			 field_select.disable_mc_local_lb, 1);
+
+	if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
+		MLX5_SET(modify_nic_vport_context_in, in,
+			 field_select.disable_uc_local_lb, 1);
+
 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
 
+	if (!err)
+		mlx5_core_dbg(mdev, "%s local_lb\n",
+			      enable ? "enable" : "disable");
+
 	kvfree(in);
 	return err;
 }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f3315bc..3529b545 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -113,6 +113,7 @@ struct mlxsw_core {
 	struct mlxsw_thermal *thermal;
 	struct mlxsw_core_port *ports;
 	unsigned int max_ports;
+	bool reload_fail;
 	unsigned long driver_priv[0];
 	/* driver_priv has to be always the last item */
 };
@@ -962,7 +963,28 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
 						     pool_type, p_cur, p_max);
 }
 
+static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+	const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus;
+	int err;
+
+	if (!mlxsw_bus->reset)
+		return -EOPNOTSUPP;
+
+	mlxsw_core_bus_device_unregister(mlxsw_core, true);
+	mlxsw_bus->reset(mlxsw_core->bus_priv);
+	err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
+					     mlxsw_core->bus,
+					     mlxsw_core->bus_priv, true,
+					     devlink);
+	if (err)
+		mlxsw_core->reload_fail = true;
+	return err;
+}
+
 static const struct devlink_ops mlxsw_devlink_ops = {
+	.reload				= mlxsw_devlink_core_bus_device_reload,
 	.port_type_set			= mlxsw_devlink_port_type_set,
 	.port_split			= mlxsw_devlink_port_split,
 	.port_unsplit			= mlxsw_devlink_port_unsplit,
@@ -980,23 +1002,26 @@ static const struct devlink_ops mlxsw_devlink_ops = {
 
 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
 				   const struct mlxsw_bus *mlxsw_bus,
-				   void *bus_priv)
+				   void *bus_priv, bool reload,
+				   struct devlink *devlink)
 {
 	const char *device_kind = mlxsw_bus_info->device_kind;
 	struct mlxsw_core *mlxsw_core;
 	struct mlxsw_driver *mlxsw_driver;
-	struct devlink *devlink;
 	size_t alloc_size;
 	int err;
 
 	mlxsw_driver = mlxsw_core_driver_get(device_kind);
 	if (!mlxsw_driver)
 		return -EINVAL;
-	alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
-	devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
-	if (!devlink) {
-		err = -ENOMEM;
-		goto err_devlink_alloc;
+
+	if (!reload) {
+		alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+		devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
+		if (!devlink) {
+			err = -ENOMEM;
+			goto err_devlink_alloc;
+		}
 	}
 
 	mlxsw_core = devlink_priv(devlink);
@@ -1012,6 +1037,12 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
 	if (err)
 		goto err_bus_init;
 
+	if (mlxsw_driver->resources_register && !reload) {
+		err = mlxsw_driver->resources_register(mlxsw_core);
+		if (err)
+			goto err_register_resources;
+	}
+
 	err = mlxsw_ports_init(mlxsw_core);
 	if (err)
 		goto err_ports_init;
@@ -1032,9 +1063,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
 	if (err)
 		goto err_emad_init;
 
-	err = devlink_register(devlink, mlxsw_bus_info->dev);
-	if (err)
-		goto err_devlink_register;
+	if (!reload) {
+		err = devlink_register(devlink, mlxsw_bus_info->dev);
+		if (err)
+			goto err_devlink_register;
+	}
 
 	err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
 	if (err)
@@ -1057,7 +1090,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
 	mlxsw_thermal_fini(mlxsw_core->thermal);
 err_thermal_init:
 err_hwmon_init:
-	devlink_unregister(devlink);
+	if (!reload)
+		devlink_unregister(devlink);
 err_devlink_register:
 	mlxsw_emad_fini(mlxsw_core);
 err_emad_init:
@@ -1067,26 +1101,40 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
 err_ports_init:
 	mlxsw_bus->fini(bus_priv);
 err_bus_init:
-	devlink_free(devlink);
+	if (!reload)
+		devlink_resources_unregister(devlink, NULL);
+err_register_resources:
+	if (!reload)
+		devlink_free(devlink);
 err_devlink_alloc:
 	mlxsw_core_driver_put(device_kind);
 	return err;
 }
 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
 
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
+				      bool reload)
 {
 	const char *device_kind = mlxsw_core->bus_info->device_kind;
 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
 
+	if (mlxsw_core->reload_fail)
+		goto reload_fail;
+
 	if (mlxsw_core->driver->fini)
 		mlxsw_core->driver->fini(mlxsw_core);
 	mlxsw_thermal_fini(mlxsw_core->thermal);
-	devlink_unregister(devlink);
+	if (!reload)
+		devlink_unregister(devlink);
 	mlxsw_emad_fini(mlxsw_core);
 	kfree(mlxsw_core->lag.mapping);
 	mlxsw_ports_fini(mlxsw_core);
+	if (!reload)
+		devlink_resources_unregister(devlink, NULL);
 	mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+	if (reload)
+		return;
+reload_fail:
 	devlink_free(devlink);
 	mlxsw_core_driver_put(device_kind);
 }
@@ -1791,6 +1839,22 @@ void mlxsw_core_flush_owq(void)
 }
 EXPORT_SYMBOL(mlxsw_core_flush_owq);
 
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+			     const struct mlxsw_config_profile *profile,
+			     u64 *p_single_size, u64 *p_double_size,
+			     u64 *p_linear_size)
+{
+	struct mlxsw_driver *driver = mlxsw_core->driver;
+
+	if (!driver->kvd_sizes_get)
+		return -EINVAL;
+
+	return driver->kvd_sizes_get(mlxsw_core, profile,
+				     p_single_size, p_double_size,
+				     p_linear_size);
+}
+EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
+
 static int __init mlxsw_core_module_init(void)
 {
 	int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6e966af..5ddafd7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -66,8 +66,9 @@ void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
 
 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
 				   const struct mlxsw_bus *mlxsw_bus,
-				   void *bus_priv);
-void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+				   void *bus_priv, bool reload,
+				   struct devlink *devlink);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
 
 struct mlxsw_tx_info {
 	u8 local_port;
@@ -308,10 +309,20 @@ struct mlxsw_driver {
 				       u32 *p_cur, u32 *p_max);
 	void (*txhdr_construct)(struct sk_buff *skb,
 				const struct mlxsw_tx_info *tx_info);
+	int (*resources_register)(struct mlxsw_core *mlxsw_core);
+	int (*kvd_sizes_get)(struct mlxsw_core *mlxsw_core,
+			     const struct mlxsw_config_profile *profile,
+			     u64 *p_single_size, u64 *p_double_size,
+			     u64 *p_linear_size);
 	u8 txhdr_len;
 	const struct mlxsw_config_profile *profile;
 };
 
+int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+			     const struct mlxsw_config_profile *profile,
+			     u64 *p_single_size, u64 *p_double_size,
+			     u64 *p_linear_size);
+
 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
 			  enum mlxsw_res_id res_id);
 
@@ -332,6 +343,7 @@ struct mlxsw_bus {
 		    const struct mlxsw_config_profile *profile,
 		    struct mlxsw_res *res);
 	void (*fini)(void *bus_priv);
+	void (*reset)(void *bus_priv);
 	bool (*skb_transmit_busy)(void *bus_priv,
 				  const struct mlxsw_tx_info *tx_info);
 	int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index c0dcfa0..25f9915 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -539,7 +539,8 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
 	mlxsw_i2c->dev = &client->dev;
 
 	err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
-					     &mlxsw_i2c_bus, mlxsw_i2c);
+					     &mlxsw_i2c_bus, mlxsw_i2c, false,
+					     NULL);
 	if (err) {
 		dev_err(&client->dev, "Fail to register core bus\n");
 		return err;
@@ -557,7 +558,7 @@ static int mlxsw_i2c_remove(struct i2c_client *client)
 {
 	struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
 
-	mlxsw_core_bus_device_unregister(mlxsw_i2c->core);
+	mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false);
 	mutex_destroy(&mlxsw_i2c->cmd.lock);
 
 	return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 6ef20e5..85faa87 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -154,6 +154,7 @@ struct mlxsw_pci {
 		} comp;
 	} cmd;
 	struct mlxsw_bus_info bus_info;
+	const struct pci_device_id *id;
 };
 
 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -1052,38 +1053,18 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
 }
 
 static int
-mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
+mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
+				const struct mlxsw_config_profile *profile,
 				struct mlxsw_res *res)
 {
-	u32 single_size, double_size, linear_size;
+	u64 single_size, double_size, linear_size;
+	int err;
 
-	if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) ||
-	    !MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) ||
-	    !profile->used_kvd_split_data)
-		return -EIO;
-
-	linear_size = profile->kvd_linear_size;
-
-	/* The hash part is what left of the kvd without the
-	 * linear part. It is split to the single size and
-	 * double size by the parts ratio from the profile.
-	 * Both sizes must be a multiplications of the
-	 * granularity from the profile.
-	 */
-	double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size;
-	double_size *= profile->kvd_hash_double_parts;
-	double_size /= profile->kvd_hash_double_parts +
-		       profile->kvd_hash_single_parts;
-	double_size /= profile->kvd_hash_granularity;
-	double_size *= profile->kvd_hash_granularity;
-	single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size -
-		      linear_size;
-
-	/* Check results are legal. */
-	if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) ||
-	    double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) ||
-	    MLXSW_RES_GET(res, KVD_SIZE) < linear_size)
-		return -EIO;
+	err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
+				       &single_size, &double_size,
+				       &linear_size);
+	if (err)
+		return err;
 
 	MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
 	MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
@@ -1184,7 +1165,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
 			mbox, profile->adaptive_routing_group_cap);
 	}
 	if (MLXSW_RES_VALID(res, KVD_SIZE)) {
-		err = mlxsw_pci_profile_get_kvd_sizes(profile, res);
+		err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
 		if (err)
 			return err;
 
@@ -1622,16 +1603,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
 	return err;
 }
 
-static const struct mlxsw_bus mlxsw_pci_bus = {
-	.kind			= "pci",
-	.init			= mlxsw_pci_init,
-	.fini			= mlxsw_pci_fini,
-	.skb_transmit_busy	= mlxsw_pci_skb_transmit_busy,
-	.skb_transmit		= mlxsw_pci_skb_transmit,
-	.cmd_exec		= mlxsw_pci_cmd_exec,
-	.features		= MLXSW_BUS_F_TXRX,
-};
-
 static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
 			      const struct pci_device_id *id)
 {
@@ -1660,6 +1631,41 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
 	return 0;
 }
 
+static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+	pci_free_irq_vectors(mlxsw_pci->pdev);
+}
+
+static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
+{
+	int err;
+
+	err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
+	if (err < 0)
+		dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
+	return err;
+}
+
+static void mlxsw_pci_reset(void *bus_priv)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+	mlxsw_pci_free_irq_vectors(mlxsw_pci);
+	mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
+	mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+	.kind			= "pci",
+	.init			= mlxsw_pci_init,
+	.fini			= mlxsw_pci_fini,
+	.skb_transmit_busy	= mlxsw_pci_skb_transmit_busy,
+	.skb_transmit		= mlxsw_pci_skb_transmit,
+	.cmd_exec		= mlxsw_pci_cmd_exec,
+	.features		= MLXSW_BUS_F_TXRX,
+	.reset			= mlxsw_pci_reset,
+};
+
 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	const char *driver_name = pdev->driver->name;
@@ -1721,7 +1727,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto err_sw_reset;
 	}
 
-	err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+	err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
 	if (err < 0) {
 		dev_err(&pdev->dev, "MSI-X init failed\n");
 		goto err_msix_init;
@@ -1730,9 +1736,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	mlxsw_pci->bus_info.device_kind = driver_name;
 	mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
 	mlxsw_pci->bus_info.dev = &pdev->dev;
+	mlxsw_pci->id = id;
 
 	err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
-					     &mlxsw_pci_bus, mlxsw_pci);
+					     &mlxsw_pci_bus, mlxsw_pci, false,
+					     NULL);
 	if (err) {
 		dev_err(&pdev->dev, "cannot register bus device\n");
 		goto err_bus_device_register;
@@ -1741,7 +1749,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	return 0;
 
 err_bus_device_register:
-	pci_free_irq_vectors(mlxsw_pci->pdev);
+	mlxsw_pci_free_irq_vectors(mlxsw_pci);
 err_msix_init:
 err_sw_reset:
 	iounmap(mlxsw_pci->hw_addr);
@@ -1760,8 +1768,8 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
 {
 	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
 
-	mlxsw_core_bus_device_unregister(mlxsw_pci->core);
-	pci_free_irq_vectors(mlxsw_pci->pdev);
+	mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+	mlxsw_pci_free_irq_vectors(mlxsw_pci);
 	iounmap(mlxsw_pci->hw_addr);
 	pci_release_regions(mlxsw_pci->pdev);
 	pci_disable_device(mlxsw_pci->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f78bfe3..bbe4891 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1747,72 +1747,186 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
 }
 
 static int
-mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
-			     struct tc_cls_flower_offload *f,
-			     bool ingress)
+mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
+			     struct tc_cls_flower_offload *f)
 {
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
+
 	switch (f->command) {
 	case TC_CLSFLOWER_REPLACE:
-		return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
+		return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
 	case TC_CLSFLOWER_DESTROY:
-		mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
+		mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
 		return 0;
 	case TC_CLSFLOWER_STATS:
-		return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
+		return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
 	default:
 		return -EOPNOTSUPP;
 	}
 }
 
-static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
-				      void *cb_priv, bool ingress)
+static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
+					       void *type_data,
+					       void *cb_priv, bool ingress)
 {
 	struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
 
-	if (!tc_can_offload(mlxsw_sp_port->dev))
-		return -EOPNOTSUPP;
-
 	switch (type) {
 	case TC_SETUP_CLSMATCHALL:
+		if (!tc_can_offload(mlxsw_sp_port->dev))
+			return -EOPNOTSUPP;
+
 		return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
 						      ingress);
 	case TC_SETUP_CLSFLOWER:
-		return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data,
-						    ingress);
+		return 0;
 	default:
 		return -EOPNOTSUPP;
 	}
 }
 
-static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type,
-					 void *type_data, void *cb_priv)
+static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
+						  void *type_data,
+						  void *cb_priv)
 {
-	return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true);
+	return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+						   cb_priv, true);
 }
 
-static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type,
-					 void *type_data, void *cb_priv)
+static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
+						  void *type_data,
+						  void *cb_priv)
 {
-	return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false);
+	return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
+						   cb_priv, false);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
+					     void *type_data, void *cb_priv)
+{
+	struct mlxsw_sp_acl_block *acl_block = cb_priv;
+
+	switch (type) {
+	case TC_SETUP_CLSMATCHALL:
+		return 0;
+	case TC_SETUP_CLSFLOWER:
+		if (mlxsw_sp_acl_block_disabled(acl_block))
+			return -EOPNOTSUPP;
+
+		return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int
+mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+				    struct tcf_block *block, bool ingress)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	struct mlxsw_sp_acl_block *acl_block;
+	struct tcf_block_cb *block_cb;
+	int err;
+
+	block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+				       mlxsw_sp);
+	if (!block_cb) {
+		acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
+		if (!acl_block)
+			return -ENOMEM;
+		block_cb = __tcf_block_cb_register(block,
+						   mlxsw_sp_setup_tc_block_cb_flower,
+						   mlxsw_sp, acl_block);
+		if (IS_ERR(block_cb)) {
+			err = PTR_ERR(block_cb);
+			goto err_cb_register;
+		}
+	} else {
+		acl_block = tcf_block_cb_priv(block_cb);
+	}
+	tcf_block_cb_incref(block_cb);
+	err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
+				      mlxsw_sp_port, ingress);
+	if (err)
+		goto err_block_bind;
+
+	if (ingress)
+		mlxsw_sp_port->ing_acl_block = acl_block;
+	else
+		mlxsw_sp_port->eg_acl_block = acl_block;
+
+	return 0;
+
+err_block_bind:
+	if (!tcf_block_cb_decref(block_cb)) {
+		__tcf_block_cb_unregister(block_cb);
+err_cb_register:
+		mlxsw_sp_acl_block_destroy(acl_block);
+	}
+	return err;
+}
+
+static void
+mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+				      struct tcf_block *block, bool ingress)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	struct mlxsw_sp_acl_block *acl_block;
+	struct tcf_block_cb *block_cb;
+	int err;
+
+	block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
+				       mlxsw_sp);
+	if (!block_cb)
+		return;
+
+	if (ingress)
+		mlxsw_sp_port->ing_acl_block = NULL;
+	else
+		mlxsw_sp_port->eg_acl_block = NULL;
+
+	acl_block = tcf_block_cb_priv(block_cb);
+	err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
+					mlxsw_sp_port, ingress);
+	if (!err && !tcf_block_cb_decref(block_cb)) {
+		__tcf_block_cb_unregister(block_cb);
+		mlxsw_sp_acl_block_destroy(acl_block);
+	}
 }
 
 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
 				   struct tc_block_offload *f)
 {
 	tc_setup_cb_t *cb;
+	bool ingress;
+	int err;
 
-	if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
-		cb = mlxsw_sp_setup_tc_block_cb_ig;
-	else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
-		cb = mlxsw_sp_setup_tc_block_cb_eg;
-	else
+	if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+		cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
+		ingress = true;
+	} else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+		cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
+		ingress = false;
+	} else {
 		return -EOPNOTSUPP;
+	}
 
 	switch (f->command) {
 	case TC_BLOCK_BIND:
-		return tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
-					     mlxsw_sp_port);
+		err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
+					    mlxsw_sp_port);
+		if (err)
+			return err;
+		err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
+							  f->block, ingress);
+		if (err) {
+			tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
+			return err;
+		}
+		return 0;
 	case TC_BLOCK_UNBIND:
+		mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
+						      f->block, ingress);
 		tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
 		return 0;
 	default:
@@ -1842,10 +1956,18 @@ static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
 {
 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 
-	if (!enable && (mlxsw_sp_port->acl_rule_count ||
-			!list_empty(&mlxsw_sp_port->mall_tc_list))) {
-		netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
-		return -EINVAL;
+	if (!enable) {
+		if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
+		    mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
+		    !list_empty(&mlxsw_sp_port->mall_tc_list)) {
+			netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+			return -EINVAL;
+		}
+		mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
+		mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
+	} else {
+		mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
+		mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
 	}
 	return 0;
 }
@@ -3991,6 +4113,253 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
 	.resource_query_enable		= 1,
 };
 
+static bool
+mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack,
+					   u64 size)
+{
+	const struct mlxsw_config_profile *profile;
+
+	profile = &mlxsw_sp_config_profile;
+	if (size % profile->kvd_hash_granularity) {
+		NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity");
+		return false;
+	}
+	return true;
+}
+
+static int
+mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size,
+				    struct netlink_ext_ack *extack)
+{
+	NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed");
+	return -EINVAL;
+}
+
+static int
+mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size,
+					   struct netlink_ext_ack *extack)
+{
+	if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size,
+						struct netlink_ext_ack *extack)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+	if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+		return -EINVAL;
+
+	if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) {
+		NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size,
+						struct netlink_ext_ack *extack)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+	if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
+		return -EINVAL;
+
+	if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) {
+		NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+	return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
+}
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = {
+	.size_validate = mlxsw_sp_resource_kvd_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
+	.size_validate = mlxsw_sp_resource_kvd_linear_size_validate,
+	.occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = {
+	.size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate,
+};
+
+static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
+	.size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
+};
+
+static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
+static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
+static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
+
+static void
+mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
+{
+	u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+						 KVD_SINGLE_MIN_SIZE);
+	u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
+						 KVD_DOUBLE_MIN_SIZE);
+	u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+	u32 linear_size_min = 0;
+
+	/* KVD top resource */
+	mlxsw_sp_kvd_size_params.size_min = kvd_size;
+	mlxsw_sp_kvd_size_params.size_max = kvd_size;
+	mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+	mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+	/* Linear part init */
+	mlxsw_sp_linear_size_params.size_min = linear_size_min;
+	mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min -
+					       double_size_min;
+	mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+	mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+	/* Hash double part init */
+	mlxsw_sp_hash_double_size_params.size_min = double_size_min;
+	mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min -
+						    linear_size_min;
+	mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+	mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+
+	/* Hash single part init */
+	mlxsw_sp_hash_single_size_params.size_min = single_size_min;
+	mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
+						    linear_size_min;
+	mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
+	mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+}
+
+static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
+{
+	struct devlink *devlink = priv_to_devlink(mlxsw_core);
+	u32 kvd_size, single_size, double_size, linear_size;
+	const struct mlxsw_config_profile *profile;
+	int err;
+
+	profile = &mlxsw_sp_config_profile;
+	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
+		return -EIO;
+
+	mlxsw_sp_resource_size_params_prepare(mlxsw_core);
+	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+					true, kvd_size,
+					MLXSW_SP_RESOURCE_KVD,
+					DEVLINK_RESOURCE_ID_PARENT_TOP,
+					&mlxsw_sp_kvd_size_params,
+					&mlxsw_sp_resource_kvd_ops);
+	if (err)
+		return err;
+
+	linear_size = profile->kvd_linear_size;
+	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
+					false, linear_size,
+					MLXSW_SP_RESOURCE_KVD_LINEAR,
+					MLXSW_SP_RESOURCE_KVD,
+					&mlxsw_sp_linear_size_params,
+					&mlxsw_sp_resource_kvd_linear_ops);
+	if (err)
+		return err;
+
+	double_size = kvd_size - linear_size;
+	double_size *= profile->kvd_hash_double_parts;
+	double_size /= profile->kvd_hash_double_parts +
+		       profile->kvd_hash_single_parts;
+	double_size = rounddown(double_size, profile->kvd_hash_granularity);
+	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
+					false, double_size,
+					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+					MLXSW_SP_RESOURCE_KVD,
+					&mlxsw_sp_hash_double_size_params,
+					&mlxsw_sp_resource_kvd_hash_double_ops);
+	if (err)
+		return err;
+
+	single_size = kvd_size - double_size - linear_size;
+	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
+					false, single_size,
+					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+					MLXSW_SP_RESOURCE_KVD,
+					&mlxsw_sp_hash_single_size_params,
+					&mlxsw_sp_resource_kvd_hash_single_ops);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
+				  const struct mlxsw_config_profile *profile,
+				  u64 *p_single_size, u64 *p_double_size,
+				  u64 *p_linear_size)
+{
+	struct devlink *devlink = priv_to_devlink(mlxsw_core);
+	u32 double_size;
+	int err;
+
+	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+	    !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+	    !profile->used_kvd_split_data)
+		return -EIO;
+
+	/* The hash part is what left of the kvd without the
+	 * linear part. It is split to the single size and
+	 * double size by the parts ratio from the profile.
+	 * Both sizes must be a multiplications of the
+	 * granularity from the profile. In case the user
+	 * provided the sizes they are obtained via devlink.
+	 */
+	err = devlink_resource_size_get(devlink,
+					MLXSW_SP_RESOURCE_KVD_LINEAR,
+					p_linear_size);
+	if (err)
+		*p_linear_size = profile->kvd_linear_size;
+
+	err = devlink_resource_size_get(devlink,
+					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+					p_double_size);
+	if (err) {
+		double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+			      *p_linear_size;
+		double_size *= profile->kvd_hash_double_parts;
+		double_size /= profile->kvd_hash_double_parts +
+			       profile->kvd_hash_single_parts;
+		*p_double_size = rounddown(double_size,
+					   profile->kvd_hash_granularity);
+	}
+
+	err = devlink_resource_size_get(devlink,
+					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+					p_single_size);
+	if (err)
+		*p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+				 *p_double_size - *p_linear_size;
+
+	/* Check results are legal. */
+	if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
+	    *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
+	    MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
+		return -EIO;
+
+	return 0;
+}
+
 static struct mlxsw_driver mlxsw_sp_driver = {
 	.kind				= mlxsw_sp_driver_name,
 	.priv_size			= sizeof(struct mlxsw_sp),
@@ -4010,6 +4379,8 @@ static struct mlxsw_driver mlxsw_sp_driver = {
 	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
 	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
 	.txhdr_construct		= mlxsw_sp_txhdr_construct,
+	.resources_register		= mlxsw_sp_resources_register,
+	.kvd_sizes_get			= mlxsw_sp_kvd_sizes_get,
 	.txhdr_len			= MLXSW_TXHDR_LEN,
 	.profile			= &mlxsw_sp_config_profile,
 };
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 16f8fbd..58ff792 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -66,6 +66,18 @@
 #define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
 #define MLXSW_SP_KVD_GRANULARITY 128
 
+#define MLXSW_SP_RESOURCE_NAME_KVD "kvd"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
+#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
+
+enum mlxsw_sp_resource_id {
+	MLXSW_SP_RESOURCE_KVD,
+	MLXSW_SP_RESOURCE_KVD_LINEAR,
+	MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+	MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+};
+
 struct mlxsw_sp_port;
 struct mlxsw_sp_rif;
 
@@ -248,6 +260,8 @@ struct mlxsw_sp_port {
 	struct list_head vlans_list;
 	struct mlxsw_sp_qdisc *root_qdisc;
 	unsigned acl_rule_count;
+	struct mlxsw_sp_acl_block *ing_acl_block;
+	struct mlxsw_sp_acl_block *eg_acl_block;
 };
 
 static inline bool
@@ -436,6 +450,7 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
 int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
 				   unsigned int entry_count,
 				   unsigned int *p_alloc_size);
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
 
 struct mlxsw_sp_acl_rule_info {
 	unsigned int priority;
@@ -455,8 +470,11 @@ struct mlxsw_sp_acl_profile_ops {
 			   void *priv, void *ruleset_priv);
 	void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
 	int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
-			    struct net_device *dev, bool ingress);
-	void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+			    struct mlxsw_sp_port *mlxsw_sp_port,
+			    bool ingress);
+	void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+			       struct mlxsw_sp_port *mlxsw_sp_port,
+			       bool ingress);
 	u16 (*ruleset_group_id)(void *ruleset_priv);
 	size_t rule_priv_size;
 	int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
@@ -476,17 +494,34 @@ struct mlxsw_sp_acl_ops {
 				       enum mlxsw_sp_acl_profile profile);
 };
 
+struct mlxsw_sp_acl_block;
 struct mlxsw_sp_acl_ruleset;
 
 /* spectrum_acl.c */
 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block);
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+						     struct net *net);
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block,
+			    struct mlxsw_sp_port *mlxsw_sp_port,
+			    bool ingress);
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+			      struct mlxsw_sp_acl_block *block,
+			      struct mlxsw_sp_port *mlxsw_sp_port,
+			      bool ingress);
 struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
-			    bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block, u32 chain_index,
 			    enum mlxsw_sp_acl_profile profile);
 struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
-			 bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+			 struct mlxsw_sp_acl_block *block, u32 chain_index,
 			 enum mlxsw_sp_acl_profile profile);
 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
 			      struct mlxsw_sp_acl_ruleset *ruleset);
@@ -553,11 +588,14 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
 extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
 
 /* spectrum_flower.c */
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block,
 			    struct tc_cls_flower_offload *f);
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+			     struct mlxsw_sp_acl_block *block,
 			     struct tc_cls_flower_offload *f);
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+			  struct mlxsw_sp_acl_block *block,
 			  struct tc_cls_flower_offload *f);
 
 /* spectrum_qdisc.c */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 93dcd31..9439bfa 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -39,6 +39,7 @@
 #include <linux/string.h>
 #include <linux/rhashtable.h>
 #include <linux/netdevice.h>
+#include <net/net_namespace.h>
 #include <net/tc_act/tc_vlan.h>
 
 #include "reg.h"
@@ -70,9 +71,23 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
 	return acl->afk;
 }
 
-struct mlxsw_sp_acl_ruleset_ht_key {
-	struct net_device *dev; /* dev this ruleset is bound to */
+struct mlxsw_sp_acl_block_binding {
+	struct list_head list;
+	struct net_device *dev;
+	struct mlxsw_sp_port *mlxsw_sp_port;
 	bool ingress;
+};
+
+struct mlxsw_sp_acl_block {
+	struct list_head binding_list;
+	struct mlxsw_sp_acl_ruleset *ruleset_zero;
+	struct mlxsw_sp *mlxsw_sp;
+	unsigned int rule_count;
+	unsigned int disable_count;
+};
+
+struct mlxsw_sp_acl_ruleset_ht_key {
+	struct mlxsw_sp_acl_block *block;
 	u32 chain_index;
 	const struct mlxsw_sp_acl_profile_ops *ops;
 };
@@ -118,8 +133,185 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
 	return mlxsw_sp->acl->dummy_fid;
 }
 
+struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
+{
+	return block->mlxsw_sp;
+}
+
+unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
+{
+	return block ? block->rule_count : 0;
+}
+
+void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
+{
+	if (block)
+		block->disable_count++;
+}
+
+void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
+{
+	if (block)
+		block->disable_count--;
+}
+
+bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
+{
+	return block->disable_count;
+}
+
+static int
+mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+			  struct mlxsw_sp_acl_block *block,
+			  struct mlxsw_sp_acl_block_binding *binding)
+{
+	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+	return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
+				 binding->mlxsw_sp_port, binding->ingress);
+}
+
+static void
+mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block,
+			    struct mlxsw_sp_acl_block_binding *binding)
+{
+	struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
+	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+	ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
+			    binding->mlxsw_sp_port, binding->ingress);
+}
+
+static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
+{
+	return block->ruleset_zero;
+}
+
+static int
+mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
+				struct mlxsw_sp_acl_ruleset *ruleset,
+				struct mlxsw_sp_acl_block *block)
+{
+	struct mlxsw_sp_acl_block_binding *binding;
+	int err;
+
+	block->ruleset_zero = ruleset;
+	list_for_each_entry(binding, &block->binding_list, list) {
+		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+		if (err)
+			goto rollback;
+	}
+	return 0;
+
+rollback:
+	list_for_each_entry_continue_reverse(binding, &block->binding_list,
+					     list)
+		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+	block->ruleset_zero = NULL;
+
+	return err;
+}
+
+static void
+mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
+				  struct mlxsw_sp_acl_ruleset *ruleset,
+				  struct mlxsw_sp_acl_block *block)
+{
+	struct mlxsw_sp_acl_block_binding *binding;
+
+	list_for_each_entry(binding, &block->binding_list, list)
+		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+	block->ruleset_zero = NULL;
+}
+
+struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
+						     struct net *net)
+{
+	struct mlxsw_sp_acl_block *block;
+
+	block = kzalloc(sizeof(*block), GFP_KERNEL);
+	if (!block)
+		return NULL;
+	INIT_LIST_HEAD(&block->binding_list);
+	block->mlxsw_sp = mlxsw_sp;
+	return block;
+}
+
+void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
+{
+	WARN_ON(!list_empty(&block->binding_list));
+	kfree(block);
+}
+
+static struct mlxsw_sp_acl_block_binding *
+mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
+			  struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
+{
+	struct mlxsw_sp_acl_block_binding *binding;
+
+	list_for_each_entry(binding, &block->binding_list, list)
+		if (binding->mlxsw_sp_port == mlxsw_sp_port &&
+		    binding->ingress == ingress)
+			return binding;
+	return NULL;
+}
+
+int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block,
+			    struct mlxsw_sp_port *mlxsw_sp_port,
+			    bool ingress)
+{
+	struct mlxsw_sp_acl_block_binding *binding;
+	int err;
+
+	if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
+		return -EEXIST;
+
+	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+	if (!binding)
+		return -ENOMEM;
+	binding->mlxsw_sp_port = mlxsw_sp_port;
+	binding->ingress = ingress;
+
+	if (mlxsw_sp_acl_ruleset_block_bound(block)) {
+		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
+		if (err)
+			goto err_ruleset_bind;
+	}
+
+	list_add(&binding->list, &block->binding_list);
+	return 0;
+
+err_ruleset_bind:
+	kfree(binding);
+	return err;
+}
+
+int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
+			      struct mlxsw_sp_acl_block *block,
+			      struct mlxsw_sp_port *mlxsw_sp_port,
+			      bool ingress)
+{
+	struct mlxsw_sp_acl_block_binding *binding;
+
+	binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
+	if (!binding)
+		return -ENOENT;
+
+	list_del(&binding->list);
+
+	if (mlxsw_sp_acl_ruleset_block_bound(block))
+		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
+
+	kfree(binding);
+	return 0;
+}
+
 static struct mlxsw_sp_acl_ruleset *
 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block, u32 chain_index,
 			    const struct mlxsw_sp_acl_profile_ops *ops)
 {
 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
@@ -132,6 +324,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
 	if (!ruleset)
 		return ERR_PTR(-ENOMEM);
 	ruleset->ref_count = 1;
+	ruleset->ht_key.block = block;
+	ruleset->ht_key.chain_index = chain_index;
 	ruleset->ht_key.ops = ops;
 
 	err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
@@ -142,8 +336,28 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
 	if (err)
 		goto err_ops_ruleset_add;
 
+	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
+				     mlxsw_sp_acl_ruleset_ht_params);
+	if (err)
+		goto err_ht_insert;
+
+	if (!chain_index) {
+		/* We only need ruleset with chain index 0, the implicit one,
+		 * to be directly bound to device. The rest of the rulesets
+		 * are bound by "Goto action set".
+		 */
+		err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
+		if (err)
+			goto err_ruleset_bind;
+	}
+
 	return ruleset;
 
+err_ruleset_bind:
+	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
+			       mlxsw_sp_acl_ruleset_ht_params);
+err_ht_insert:
+	ops->ruleset_del(mlxsw_sp, ruleset->priv);
 err_ops_ruleset_add:
 	rhashtable_destroy(&ruleset->rule_ht);
 err_rhashtable_init:
@@ -155,57 +369,19 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
 					 struct mlxsw_sp_acl_ruleset *ruleset)
 {
 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+	struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+	u32 chain_index = ruleset->ht_key.chain_index;
+	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
 
+	if (!chain_index)
+		mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
+	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
+			       mlxsw_sp_acl_ruleset_ht_params);
 	ops->ruleset_del(mlxsw_sp, ruleset->priv);
 	rhashtable_destroy(&ruleset->rule_ht);
 	kfree(ruleset);
 }
 
-static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
-				     struct mlxsw_sp_acl_ruleset *ruleset,
-				     struct net_device *dev, bool ingress,
-				     u32 chain_index)
-{
-	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
-	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
-	int err;
-
-	ruleset->ht_key.dev = dev;
-	ruleset->ht_key.ingress = ingress;
-	ruleset->ht_key.chain_index = chain_index;
-	err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
-				     mlxsw_sp_acl_ruleset_ht_params);
-	if (err)
-		return err;
-	if (!ruleset->ht_key.chain_index) {
-		/* We only need ruleset with chain index 0, the implicit one,
-		 * to be directly bound to device. The rest of the rulesets
-		 * are bound by "Goto action set".
-		 */
-		err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
-		if (err)
-			goto err_ops_ruleset_bind;
-	}
-	return 0;
-
-err_ops_ruleset_bind:
-	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
-			       mlxsw_sp_acl_ruleset_ht_params);
-	return err;
-}
-
-static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
-					struct mlxsw_sp_acl_ruleset *ruleset)
-{
-	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
-	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
-
-	if (!ruleset->ht_key.chain_index)
-		ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
-	rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
-			       mlxsw_sp_acl_ruleset_ht_params);
-}
-
 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
 {
 	ruleset->ref_count++;
@@ -216,20 +392,18 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
 {
 	if (--ruleset->ref_count)
 		return;
-	mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
 	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
 }
 
 static struct mlxsw_sp_acl_ruleset *
-__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
-			      bool ingress, u32 chain_index,
+__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
+			      struct mlxsw_sp_acl_block *block, u32 chain_index,
 			      const struct mlxsw_sp_acl_profile_ops *ops)
 {
 	struct mlxsw_sp_acl_ruleset_ht_key ht_key;
 
 	memset(&ht_key, 0, sizeof(ht_key));
-	ht_key.dev = dev;
-	ht_key.ingress = ingress;
+	ht_key.block = block;
 	ht_key.chain_index = chain_index;
 	ht_key.ops = ops;
 	return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
@@ -237,8 +411,8 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
 }
 
 struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
-			    bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block, u32 chain_index,
 			    enum mlxsw_sp_acl_profile profile)
 {
 	const struct mlxsw_sp_acl_profile_ops *ops;
@@ -248,45 +422,31 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
 	ops = acl->ops->profile_ops(mlxsw_sp, profile);
 	if (!ops)
 		return ERR_PTR(-EINVAL);
-	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
-						chain_index, ops);
+	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
 	if (!ruleset)
 		return ERR_PTR(-ENOENT);
 	return ruleset;
 }
 
 struct mlxsw_sp_acl_ruleset *
-mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
-			 bool ingress, u32 chain_index,
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+			 struct mlxsw_sp_acl_block *block, u32 chain_index,
 			 enum mlxsw_sp_acl_profile profile)
 {
 	const struct mlxsw_sp_acl_profile_ops *ops;
 	struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
 	struct mlxsw_sp_acl_ruleset *ruleset;
-	int err;
 
 	ops = acl->ops->profile_ops(mlxsw_sp, profile);
 	if (!ops)
 		return ERR_PTR(-EINVAL);
 
-	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
-						chain_index, ops);
+	ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
 	if (ruleset) {
 		mlxsw_sp_acl_ruleset_ref_inc(ruleset);
 		return ruleset;
 	}
-	ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
-	if (IS_ERR(ruleset))
-		return ruleset;
-	err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev,
-					ingress, chain_index);
-	if (err)
-		goto err_ruleset_bind;
-	return ruleset;
-
-err_ruleset_bind:
-	mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
-	return ERR_PTR(err);
+	return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops);
 }
 
 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
@@ -535,6 +695,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
 		goto err_rhashtable_insert;
 
 	list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+	ruleset->ht_key.block->rule_count++;
 	return 0;
 
 err_rhashtable_insert:
@@ -548,6 +709,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
 	struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
 	const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
 
+	ruleset->ht_key.block->rule_count--;
 	list_del(&rule->list);
 	rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
 			       mlxsw_sp_acl_rule_ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 7e8284b..c6e180c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -154,10 +154,6 @@ struct mlxsw_sp_acl_tcam_group {
 	struct list_head region_list;
 	unsigned int region_count;
 	struct rhashtable chunk_ht;
-	struct {
-		u16 local_port;
-		bool ingress;
-	} bound;
 	struct mlxsw_sp_acl_tcam_group_ops *ops;
 	const struct mlxsw_sp_acl_tcam_pattern *patterns;
 	unsigned int patterns_count;
@@ -262,35 +258,29 @@ static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
 static int
 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
 			     struct mlxsw_sp_acl_tcam_group *group,
-			     struct net_device *dev, bool ingress)
+			     struct mlxsw_sp_port *mlxsw_sp_port,
+			     bool ingress)
 {
-	struct mlxsw_sp_port *mlxsw_sp_port;
 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
 
-	if (!mlxsw_sp_port_dev_check(dev))
-		return -EINVAL;
-
-	mlxsw_sp_port = netdev_priv(dev);
-	group->bound.local_port = mlxsw_sp_port->local_port;
-	group->bound.ingress = ingress;
-	mlxsw_reg_ppbt_pack(ppbt_pl,
-			    group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
-						   MLXSW_REG_PXBT_E_EACL,
-			    MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
+	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+					       MLXSW_REG_PXBT_E_EACL,
+			    MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
 			    group->id);
 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
 }
 
 static void
 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
-			       struct mlxsw_sp_acl_tcam_group *group)
+			       struct mlxsw_sp_acl_tcam_group *group,
+			       struct mlxsw_sp_port *mlxsw_sp_port,
+			       bool ingress)
 {
 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
 
-	mlxsw_reg_ppbt_pack(ppbt_pl,
-			    group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
-						   MLXSW_REG_PXBT_E_EACL,
-			    MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
+	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
+					       MLXSW_REG_PXBT_E_EACL,
+			    MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
 			    group->id);
 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
 }
@@ -1056,21 +1046,25 @@ mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
 static int
 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
 				      void *ruleset_priv,
-				      struct net_device *dev, bool ingress)
+				      struct mlxsw_sp_port *mlxsw_sp_port,
+				      bool ingress)
 {
 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
 
 	return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
-					    dev, ingress);
+					    mlxsw_sp_port, ingress);
 }
 
 static void
 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
-					void *ruleset_priv)
+					void *ruleset_priv,
+					struct mlxsw_sp_port *mlxsw_sp_port,
+					bool ingress)
 {
 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
 
-	mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
+	mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
+				       mlxsw_sp_port, ingress);
 }
 
 static u16
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 96fdba7..f56fa18 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -771,14 +771,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
 	.size_get = mlxsw_sp_dpipe_table_host4_size_get,
 };
 
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4 1
+
 static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp)
 {
 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+	int err;
 
-	return devlink_dpipe_table_register(devlink,
-					    MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
-					    &mlxsw_sp_host4_ops,
-					    mlxsw_sp, false);
+	err = devlink_dpipe_table_register(devlink,
+					   MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+					   &mlxsw_sp_host4_ops,
+					   mlxsw_sp, false);
+	if (err)
+		return err;
+
+	err = devlink_dpipe_table_resource_set(devlink,
+					       MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+					       MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+					       MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
+	if (err)
+		goto err_resource_set;
+
+	return 0;
+
+err_resource_set:
+	devlink_dpipe_table_unregister(devlink,
+				       MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+	return err;
 }
 
 static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -829,14 +848,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
 	.size_get = mlxsw_sp_dpipe_table_host6_size_get,
 };
 
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6 2
+
 static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp)
 {
 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+	int err;
 
-	return devlink_dpipe_table_register(devlink,
-					    MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
-					    &mlxsw_sp_host6_ops,
-					    mlxsw_sp, false);
+	err = devlink_dpipe_table_register(devlink,
+					   MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+					   &mlxsw_sp_host6_ops,
+					   mlxsw_sp, false);
+	if (err)
+		return err;
+
+	err = devlink_dpipe_table_resource_set(devlink,
+					       MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+					       MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+					       MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
+	if (err)
+		goto err_resource_set;
+
+	return 0;
+
+err_resource_set:
+	devlink_dpipe_table_unregister(devlink,
+				       MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+	return err;
 }
 
 static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
@@ -1213,14 +1251,33 @@ static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
 	.size_get = mlxsw_sp_dpipe_table_adj_size_get,
 };
 
+#define MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ 1
+
 static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
 {
 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+	int err;
 
-	return devlink_dpipe_table_register(devlink,
-					    MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
-					    &mlxsw_sp_dpipe_table_adj_ops,
-					    mlxsw_sp, false);
+	err = devlink_dpipe_table_register(devlink,
+					   MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+					   &mlxsw_sp_dpipe_table_adj_ops,
+					   mlxsw_sp, false);
+	if (err)
+		return err;
+
+	err = devlink_dpipe_table_resource_set(devlink,
+					       MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+					       MLXSW_SP_RESOURCE_KVD_LINEAR,
+					       MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
+	if (err)
+		goto err_resource_set;
+
+	return 0;
+
+err_resource_set:
+	devlink_dpipe_table_unregister(devlink,
+				       MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+	return err;
 }
 
 static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 42e8a36..cf7b97d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -35,6 +35,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/netdevice.h>
+#include <net/net_namespace.h>
 #include <net/flow_dissector.h>
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_gact.h>
@@ -45,7 +46,7 @@
 #include "core_acl_flex_keys.h"
 
 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
-					 struct net_device *dev, bool ingress,
+					 struct mlxsw_sp_acl_block *block,
 					 struct mlxsw_sp_acl_rule_info *rulei,
 					 struct tcf_exts *exts)
 {
@@ -80,8 +81,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
 			struct mlxsw_sp_acl_ruleset *ruleset;
 			u16 group_id;
 
-			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev,
-							      ingress,
+			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
 							      chain_index,
 							      MLXSW_SP_ACL_PROFILE_FLOWER);
 			if (IS_ERR(ruleset))
@@ -104,9 +104,6 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
 				return err;
 
 			out_dev = tcf_mirred_dev(a);
-			if (out_dev == dev)
-				out_dev = NULL;
-
 			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
 							 out_dev);
 			if (err)
@@ -265,7 +262,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
 }
 
 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
-				 struct net_device *dev, bool ingress,
+				 struct mlxsw_sp_acl_block *block,
 				 struct mlxsw_sp_acl_rule_info *rulei,
 				 struct tc_cls_flower_offload *f)
 {
@@ -383,21 +380,19 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
 	if (err)
 		return err;
 
-	return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress,
-					     rulei, f->exts);
+	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts);
 }
 
-int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
+			    struct mlxsw_sp_acl_block *block,
 			    struct tc_cls_flower_offload *f)
 {
-	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-	struct net_device *dev = mlxsw_sp_port->dev;
 	struct mlxsw_sp_acl_rule_info *rulei;
 	struct mlxsw_sp_acl_ruleset *ruleset;
 	struct mlxsw_sp_acl_rule *rule;
 	int err;
 
-	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
+	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
 					   f->common.chain_index,
 					   MLXSW_SP_ACL_PROFILE_FLOWER);
 	if (IS_ERR(ruleset))
@@ -410,7 +405,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 	}
 
 	rulei = mlxsw_sp_acl_rule_rulei(rule);
-	err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f);
+	err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
 	if (err)
 		goto err_flower_parse;
 
@@ -423,7 +418,6 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 		goto err_rule_add;
 
 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
-	mlxsw_sp_port->acl_rule_count++;
 	return 0;
 
 err_rule_add:
@@ -435,15 +429,15 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 	return err;
 }
 
-void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
+			     struct mlxsw_sp_acl_block *block,
 			     struct tc_cls_flower_offload *f)
 {
-	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	struct mlxsw_sp_acl_ruleset *ruleset;
 	struct mlxsw_sp_acl_rule *rule;
 
-	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
-					   ingress, f->common.chain_index,
+	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+					   f->common.chain_index,
 					   MLXSW_SP_ACL_PROFILE_FLOWER);
 	if (IS_ERR(ruleset))
 		return;
@@ -455,13 +449,12 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 	}
 
 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
-	mlxsw_sp_port->acl_rule_count--;
 }
 
-int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
+			  struct mlxsw_sp_acl_block *block,
 			  struct tc_cls_flower_offload *f)
 {
-	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	struct mlxsw_sp_acl_ruleset *ruleset;
 	struct mlxsw_sp_acl_rule *rule;
 	u64 packets;
@@ -469,8 +462,8 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 	u64 bytes;
 	int err;
 
-	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
-					   ingress, f->common.chain_index,
+	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
+					   f->common.chain_index,
 					   MLXSW_SP_ACL_PROFILE_FLOWER);
 	if (WARN_ON(IS_ERR(ruleset)))
 		return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 310c382..cfacc17 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -286,6 +286,32 @@ static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
 		mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
 }
 
+u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
+{
+	unsigned int nr_entries;
+	int bit = -1;
+	u64 occ = 0;
+
+	nr_entries = (part->info->end_index -
+		      part->info->start_index + 1) /
+		      part->info->alloc_size;
+	while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
+		< nr_entries)
+		occ += part->info->alloc_size;
+	return occ;
+}
+
+u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
+{
+	struct mlxsw_sp_kvdl_part *part;
+	u64 occ = 0;
+
+	list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list)
+		occ += mlxsw_sp_kvdl_part_occ(part);
+
+	return occ;
+}
+
 int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
 {
 	struct mlxsw_sp_kvdl *kvdl;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index e11a0ab..0b76704 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -247,6 +247,8 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
 
 	stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
 	stats_base->drops = red_base->prob_drop + red_base->pdrop;
+
+	stats_base->backlog = 0;
 }
 
 static int
@@ -306,6 +308,19 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
 						 max, prob, p->is_ecn);
 }
 
+static void
+mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
+			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			     void *params)
+{
+	struct tc_red_qopt_offload_params *p = params;
+	u64 backlog;
+
+	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+				       mlxsw_sp_qdisc->stats_base.backlog);
+	p->qstats->backlog -= backlog;
+}
+
 static int
 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@@ -338,7 +353,7 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
 			     struct tc_qopt_offload_stats *stats_ptr)
 {
-	u64 tx_bytes, tx_packets, overlimits, drops;
+	u64 tx_bytes, tx_packets, overlimits, drops, backlog;
 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
 	struct mlxsw_sp_qdisc_stats *stats_base;
 	struct mlxsw_sp_port_xstats *xstats;
@@ -354,14 +369,18 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
 		     stats_base->overlimits;
 	drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
 		stats_base->drops;
+	backlog = xstats->backlog[tclass_num];
 
 	_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
 	stats_ptr->qstats->overlimits += overlimits;
 	stats_ptr->qstats->drops += drops;
 	stats_ptr->qstats->backlog +=
-			mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
-					     xstats->backlog[tclass_num]);
+				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+						     backlog) -
+				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+						     stats_base->backlog);
 
+	stats_base->backlog = backlog;
 	stats_base->drops +=  drops;
 	stats_base->overlimits += overlimits;
 	stats_base->tx_bytes += tx_bytes;
@@ -375,6 +394,7 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
 	.type = MLXSW_SP_QDISC_RED,
 	.check_params = mlxsw_sp_qdisc_red_check_params,
 	.replace = mlxsw_sp_qdisc_red_replace,
+	.unoffload = mlxsw_sp_qdisc_red_unoffload,
 	.destroy = mlxsw_sp_qdisc_red_destroy,
 	.get_stats = mlxsw_sp_qdisc_get_red_stats,
 	.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
@@ -460,7 +480,7 @@ mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
 	return 0;
 }
 
-void
+static void
 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
 			      void *params)
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 6e5ef98..064f00e 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -44,6 +44,7 @@
 
 ifeq ($(CONFIG_BPF_SYSCALL),y)
 nfp-objs += \
+	    bpf/cmsg.o \
 	    bpf/main.o \
 	    bpf/offload.o \
 	    bpf/verifier.o \
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
new file mode 100644
index 0000000..71e6586
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bpf.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/jiffies.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "fw.h"
+#include "main.h"
+
+#define cmsg_warn(bpf, msg...)	nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
+
+#define NFP_BPF_TAG_ALLOC_SPAN	(U16_MAX / 4)
+
+static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
+{
+	u16 used_tags;
+
+	used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
+
+	return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
+}
+
+static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
+{
+	/* All FW communication for BPF is request-reply.  To make sure we
+	 * don't reuse the message ID too early after timeout - limit the
+	 * number of requests in flight.
+	 */
+	if (nfp_bpf_all_tags_busy(bpf)) {
+		cmsg_warn(bpf, "all FW request contexts busy!\n");
+		return -EAGAIN;
+	}
+
+	WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
+	return bpf->tag_alloc_next++;
+}
+
+static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
+{
+	WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
+
+	while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
+	       bpf->tag_alloc_last != bpf->tag_alloc_next)
+		bpf->tag_alloc_last++;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
+{
+	struct sk_buff *skb;
+
+	skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
+	skb_put(skb, size);
+
+	return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
+{
+	unsigned int size;
+
+	size = sizeof(struct cmsg_req_map_op);
+	size += sizeof(struct cmsg_key_value_pair) * n;
+
+	return nfp_bpf_cmsg_alloc(bpf, size);
+}
+
+static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
+{
+	struct cmsg_hdr *hdr;
+
+	hdr = (struct cmsg_hdr *)skb->data;
+
+	return be16_to_cpu(hdr->tag);
+}
+
+static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
+{
+	unsigned int msg_tag;
+	struct sk_buff *skb;
+
+	skb_queue_walk(&bpf->cmsg_replies, skb) {
+		msg_tag = nfp_bpf_cmsg_get_tag(skb);
+		if (msg_tag == tag) {
+			nfp_bpf_free_tag(bpf, tag);
+			__skb_unlink(skb, &bpf->cmsg_replies);
+			return skb;
+		}
+	}
+
+	return NULL;
+}
+
+static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
+{
+	struct sk_buff *skb;
+
+	nfp_ctrl_lock(bpf->app->ctrl);
+	skb = __nfp_bpf_reply(bpf, tag);
+	nfp_ctrl_unlock(bpf->app->ctrl);
+
+	return skb;
+}
+
+static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
+{
+	struct sk_buff *skb;
+
+	nfp_ctrl_lock(bpf->app->ctrl);
+	skb = __nfp_bpf_reply(bpf, tag);
+	if (!skb)
+		nfp_bpf_free_tag(bpf, tag);
+	nfp_ctrl_unlock(bpf->app->ctrl);
+
+	return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
+			int tag)
+{
+	struct sk_buff *skb;
+	int err;
+
+	err = wait_event_interruptible_timeout(bpf->cmsg_wq,
+					       skb = nfp_bpf_reply(bpf, tag),
+					       msecs_to_jiffies(5000));
+	/* We didn't get a response - try last time and atomically drop
+	 * the tag even if no response is matched.
+	 */
+	if (!skb)
+		skb = nfp_bpf_reply_drop_tag(bpf, tag);
+	if (err < 0) {
+		cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
+			  err == ERESTARTSYS ? "interrupted" : "error",
+			  type, err);
+		return ERR_PTR(err);
+	}
+	if (!skb) {
+		cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
+			  type);
+		return ERR_PTR(-ETIMEDOUT);
+	}
+
+	return skb;
+}
+
+static struct sk_buff *
+nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
+			 enum nfp_bpf_cmsg_type type, unsigned int reply_size)
+{
+	struct cmsg_hdr *hdr;
+	int tag;
+
+	nfp_ctrl_lock(bpf->app->ctrl);
+	tag = nfp_bpf_alloc_tag(bpf);
+	if (tag < 0) {
+		nfp_ctrl_unlock(bpf->app->ctrl);
+		dev_kfree_skb_any(skb);
+		return ERR_PTR(tag);
+	}
+
+	hdr = (void *)skb->data;
+	hdr->ver = CMSG_MAP_ABI_VERSION;
+	hdr->type = type;
+	hdr->tag = cpu_to_be16(tag);
+
+	__nfp_app_ctrl_tx(bpf->app, skb);
+
+	nfp_ctrl_unlock(bpf->app->ctrl);
+
+	skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
+	if (IS_ERR(skb))
+		return skb;
+
+	hdr = (struct cmsg_hdr *)skb->data;
+	/* 0 reply_size means caller will do the validation */
+	if (reply_size && skb->len != reply_size) {
+		cmsg_warn(bpf, "cmsg drop - wrong size %d != %d!\n",
+			  skb->len, reply_size);
+		goto err_free;
+	}
+	if (hdr->type != __CMSG_REPLY(type)) {
+		cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
+			  hdr->type, __CMSG_REPLY(type));
+		goto err_free;
+	}
+
+	return skb;
+err_free:
+	dev_kfree_skb_any(skb);
+	return ERR_PTR(-EIO);
+}
+
+static int
+nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
+			 struct cmsg_reply_map_simple *reply)
+{
+	static const int res_table[] = {
+		[CMSG_RC_SUCCESS]	= 0,
+		[CMSG_RC_ERR_MAP_FD]	= -EBADFD,
+		[CMSG_RC_ERR_MAP_NOENT]	= -ENOENT,
+		[CMSG_RC_ERR_MAP_ERR]	= -EINVAL,
+		[CMSG_RC_ERR_MAP_PARSE]	= -EIO,
+		[CMSG_RC_ERR_MAP_EXIST]	= -EEXIST,
+		[CMSG_RC_ERR_MAP_NOMEM]	= -ENOMEM,
+		[CMSG_RC_ERR_MAP_E2BIG]	= -E2BIG,
+	};
+	u32 rc;
+
+	rc = be32_to_cpu(reply->rc);
+	if (rc >= ARRAY_SIZE(res_table)) {
+		cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
+		return -EIO;
+	}
+
+	return res_table[rc];
+}
+
+long long int
+nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
+{
+	struct cmsg_reply_map_alloc_tbl *reply;
+	struct cmsg_req_map_alloc_tbl *req;
+	struct sk_buff *skb;
+	u32 tid;
+	int err;
+
+	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
+	if (!skb)
+		return -ENOMEM;
+
+	req = (void *)skb->data;
+	req->key_size = cpu_to_be32(map->key_size);
+	req->value_size = cpu_to_be32(map->value_size);
+	req->max_entries = cpu_to_be32(map->max_entries);
+	req->map_type = cpu_to_be32(map->map_type);
+	req->map_flags = 0;
+
+	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
+				       sizeof(*reply));
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	reply = (void *)skb->data;
+	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+	if (err)
+		goto err_free;
+
+	tid = be32_to_cpu(reply->tid);
+	dev_consume_skb_any(skb);
+
+	return tid;
+err_free:
+	dev_kfree_skb_any(skb);
+	return err;
+}
+
+void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
+{
+	struct cmsg_reply_map_free_tbl *reply;
+	struct cmsg_req_map_free_tbl *req;
+	struct sk_buff *skb;
+	int err;
+
+	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
+	if (!skb) {
+		cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
+		return;
+	}
+
+	req = (void *)skb->data;
+	req->tid = cpu_to_be32(nfp_map->tid);
+
+	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
+				       sizeof(*reply));
+	if (IS_ERR(skb)) {
+		cmsg_warn(bpf, "leaking map - I/O error\n");
+		return;
+	}
+
+	reply = (void *)skb->data;
+	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+	if (err)
+		cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
+
+	dev_consume_skb_any(skb);
+}
+
+static int
+nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
+		      enum nfp_bpf_cmsg_type op,
+		      u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
+{
+	struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+	struct nfp_app_bpf *bpf = nfp_map->bpf;
+	struct bpf_map *map = &offmap->map;
+	struct cmsg_reply_map_op *reply;
+	struct cmsg_req_map_op *req;
+	struct sk_buff *skb;
+	int err;
+
+	/* FW messages have no space for more than 32 bits of flags */
+	if (flags >> 32)
+		return -EOPNOTSUPP;
+
+	skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
+	if (!skb)
+		return -ENOMEM;
+
+	req = (void *)skb->data;
+	req->tid = cpu_to_be32(nfp_map->tid);
+	req->count = cpu_to_be32(1);
+	req->flags = cpu_to_be32(flags);
+
+	/* Copy inputs */
+	if (key)
+		memcpy(&req->elem[0].key, key, map->key_size);
+	if (value)
+		memcpy(&req->elem[0].value, value, map->value_size);
+
+	skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
+				       sizeof(*reply) + sizeof(*reply->elem));
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	reply = (void *)skb->data;
+	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+	if (err)
+		goto err_free;
+
+	/* Copy outputs */
+	if (out_key)
+		memcpy(out_key, &reply->elem[0].key, map->key_size);
+	if (out_value)
+		memcpy(out_value, &reply->elem[0].value, map->value_size);
+
+	dev_consume_skb_any(skb);
+
+	return 0;
+err_free:
+	dev_kfree_skb_any(skb);
+	return err;
+}
+
+int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
+			      void *key, void *value, u64 flags)
+{
+	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
+				     key, value, flags, NULL, NULL);
+}
+
+int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
+{
+	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
+				     key, NULL, 0, NULL, NULL);
+}
+
+int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
+			      void *key, void *value)
+{
+	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
+				     key, NULL, 0, NULL, value);
+}
+
+int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
+				void *next_key)
+{
+	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
+				     NULL, NULL, 0, next_key, NULL);
+}
+
+int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
+			       void *key, void *next_key)
+{
+	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
+				     key, NULL, 0, next_key, NULL);
+}
+
+void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+	struct nfp_app_bpf *bpf = app->priv;
+	unsigned int tag;
+
+	if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
+		cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
+		goto err_free;
+	}
+
+	nfp_ctrl_lock(bpf->app->ctrl);
+
+	tag = nfp_bpf_cmsg_get_tag(skb);
+	if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
+		cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
+			  tag);
+		goto err_unlock;
+	}
+
+	__skb_queue_tail(&bpf->cmsg_replies, skb);
+	wake_up_interruptible_all(&bpf->cmsg_wq);
+
+	nfp_ctrl_unlock(bpf->app->ctrl);
+
+	return;
+err_unlock:
+	nfp_ctrl_unlock(bpf->app->ctrl);
+err_free:
+	dev_kfree_skb_any(skb);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index 7206aa1..cfcc7bc 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -38,7 +38,14 @@
 #include <linux/types.h>
 
 enum bpf_cap_tlv_type {
+	NFP_BPF_CAP_TYPE_FUNC		= 1,
 	NFP_BPF_CAP_TYPE_ADJUST_HEAD	= 2,
+	NFP_BPF_CAP_TYPE_MAPS		= 3,
+};
+
+struct nfp_bpf_cap_tlv_func {
+	__le32 func_id;
+	__le32 func_addr;
 };
 
 struct nfp_bpf_cap_tlv_adjust_head {
@@ -51,4 +58,100 @@ struct nfp_bpf_cap_tlv_adjust_head {
 
 #define NFP_BPF_ADJUST_HEAD_NO_META	BIT(0)
 
+struct nfp_bpf_cap_tlv_maps {
+	__le32 types;
+	__le32 max_maps;
+	__le32 max_elems;
+	__le32 max_key_sz;
+	__le32 max_val_sz;
+	__le32 max_elem_sz;
+};
+
+/*
+ * Types defined for map related control messages
+ */
+#define CMSG_MAP_ABI_VERSION		1
+
+enum nfp_bpf_cmsg_type {
+	CMSG_TYPE_MAP_ALLOC	= 1,
+	CMSG_TYPE_MAP_FREE	= 2,
+	CMSG_TYPE_MAP_LOOKUP	= 3,
+	CMSG_TYPE_MAP_UPDATE	= 4,
+	CMSG_TYPE_MAP_DELETE	= 5,
+	CMSG_TYPE_MAP_GETNEXT	= 6,
+	CMSG_TYPE_MAP_GETFIRST	= 7,
+	__CMSG_TYPE_MAP_MAX,
+};
+
+#define CMSG_TYPE_MAP_REPLY_BIT		7
+#define __CMSG_REPLY(req)		(BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
+
+#define CMSG_MAP_KEY_LW			16
+#define CMSG_MAP_VALUE_LW		16
+
+enum nfp_bpf_cmsg_status {
+	CMSG_RC_SUCCESS			= 0,
+	CMSG_RC_ERR_MAP_FD		= 1,
+	CMSG_RC_ERR_MAP_NOENT		= 2,
+	CMSG_RC_ERR_MAP_ERR		= 3,
+	CMSG_RC_ERR_MAP_PARSE		= 4,
+	CMSG_RC_ERR_MAP_EXIST		= 5,
+	CMSG_RC_ERR_MAP_NOMEM		= 6,
+	CMSG_RC_ERR_MAP_E2BIG		= 7,
+};
+
+struct cmsg_hdr {
+	u8 type;
+	u8 ver;
+	__be16 tag;
+};
+
+struct cmsg_reply_map_simple {
+	struct cmsg_hdr hdr;
+	__be32 rc;
+};
+
+struct cmsg_req_map_alloc_tbl {
+	struct cmsg_hdr hdr;
+	__be32 key_size;		/* in bytes */
+	__be32 value_size;		/* in bytes */
+	__be32 max_entries;
+	__be32 map_type;
+	__be32 map_flags;		/* reserved */
+};
+
+struct cmsg_reply_map_alloc_tbl {
+	struct cmsg_reply_map_simple reply_hdr;
+	__be32 tid;
+};
+
+struct cmsg_req_map_free_tbl {
+	struct cmsg_hdr hdr;
+	__be32 tid;
+};
+
+struct cmsg_reply_map_free_tbl {
+	struct cmsg_reply_map_simple reply_hdr;
+	__be32 count;
+};
+
+struct cmsg_key_value_pair {
+	__be32 key[CMSG_MAP_KEY_LW];
+	__be32 value[CMSG_MAP_VALUE_LW];
+};
+
+struct cmsg_req_map_op {
+	struct cmsg_hdr hdr;
+	__be32 tid;
+	__be32 count;
+	__be32 flags;
+	struct cmsg_key_value_pair elem[0];
+};
+
+struct cmsg_reply_map_op {
+	struct cmsg_reply_map_simple reply_hdr;
+	__be32 count;
+	__be32 resv;
+	struct cmsg_key_value_pair elem[0];
+};
 #endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 47c5224..56451ed 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -483,6 +483,21 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
 	}
 }
 
+static void
+wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
+	       enum nfp_relo_type relo)
+{
+	if (imm > 0xffff) {
+		pr_err("relocation of a large immediate!\n");
+		nfp_prog->error = -EFAULT;
+		return;
+	}
+	emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
+
+	nfp_prog->prog[nfp_prog->prog_len - 1] |=
+		FIELD_PREP(OP_RELO_TYPE, relo);
+}
+
 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
  * If the @imm is small enough encode it directly in operand and return
  * otherwise load @imm to a spare register and return its encoding.
@@ -538,27 +553,51 @@ wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
 	emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
 }
 
+static void
+addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+	      swreg *rega, swreg *regb)
+{
+	if (offset == reg_imm(0)) {
+		*rega = reg_a(src_gpr);
+		*regb = reg_b(src_gpr + 1);
+		return;
+	}
+
+	emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset);
+	emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C,
+		 reg_imm(0));
+	*rega = imm_a(nfp_prog);
+	*regb = imm_b(nfp_prog);
+}
+
 /* NFP has Command Push Pull bus which supports bluk memory operations. */
 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
 	bool descending_seq = meta->ldst_gather_len < 0;
 	s16 len = abs(meta->ldst_gather_len);
 	swreg src_base, off;
+	bool src_40bit_addr;
 	unsigned int i;
 	u8 xfer_num;
 
 	off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+	src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE;
 	src_base = reg_a(meta->insn.src_reg * 2);
 	xfer_num = round_up(len, 4) / 4;
 
+	if (src_40bit_addr)
+		addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
+			      &off);
+
 	/* Setup PREV_ALU fields to override memory read length. */
 	if (len > 32)
 		wrp_immed(nfp_prog, reg_none(),
 			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
 
 	/* Memory read from source addr into transfer-in registers. */
-	emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base,
-		     off, xfer_num - 1, true, len > 32);
+	emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP,
+		     src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0,
+		     src_base, off, xfer_num - 1, true, len > 32);
 
 	/* Move from transfer-in to transfer-out. */
 	for (i = 0; i < xfer_num; i++)
@@ -696,20 +735,20 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
 }
 
 static int
-data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
-		   u8 dst_gpr, int size)
+data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
+		   swreg lreg, swreg rreg, int size, enum cmd_mode mode)
 {
 	unsigned int i;
 	u8 mask, sz;
 
-	/* We load the value from the address indicated in @offset and then
+	/* We load the value from the address indicated in rreg + lreg and then
 	 * mask out the data we don't need.  Note: this is little endian!
 	 */
 	sz = max(size, 4);
 	mask = size < 4 ? GENMASK(size - 1, 0) : 0;
 
-	emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0,
-		 reg_a(src_gpr), offset, sz / 4 - 1, true);
+	emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0,
+		 lreg, rreg, sz / 4 - 1, true);
 
 	i = 0;
 	if (mask)
@@ -726,6 +765,26 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
 }
 
 static int
+data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+			  u8 dst_gpr, u8 size)
+{
+	return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
+				  size, CMD_MODE_32b);
+}
+
+static int
+data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+			  u8 dst_gpr, u8 size)
+{
+	swreg rega, regb;
+
+	addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
+
+	return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
+				  size, CMD_MODE_40b_BA);
+}
+
+static int
 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
 {
 	swreg tmp_reg;
@@ -1279,6 +1338,56 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 	return 0;
 }
 
+static int
+map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	struct bpf_offloaded_map *offmap;
+	struct nfp_bpf_map *nfp_map;
+	bool load_lm_ptr;
+	u32 ret_tgt;
+	s64 lm_off;
+	swreg tid;
+
+	offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr;
+	nfp_map = offmap->dev_priv;
+
+	/* We only have to reload LM0 if the key is not at start of stack */
+	lm_off = nfp_prog->stack_depth;
+	lm_off += meta->arg2.var_off.value + meta->arg2.off;
+	load_lm_ptr = meta->arg2_var_off || lm_off;
+
+	/* Set LM0 to start of key */
+	if (load_lm_ptr)
+		emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);
+
+	/* Load map ID into a register, it should actually fit as an immediate
+	 * but in case it doesn't deal with it here, not in the delay slots.
+	 */
+	tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog));
+
+	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem,
+		     2, RELO_BR_HELPER);
+	ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
+
+	/* Load map ID into A0 */
+	wrp_mov(nfp_prog, reg_a(0), tid);
+
+	/* Load the return address into B0 */
+	wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
+
+	if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
+		return -EINVAL;
+
+	/* Reset the LM0 pointer */
+	if (!load_lm_ptr)
+		return 0;
+
+	emit_csr_wr(nfp_prog, stack_reg(nfp_prog),  NFP_CSR_ACT_LM_ADDR0);
+	wrp_nops(nfp_prog, 3);
+
+	return 0;
+}
+
 /* --- Callbacks --- */
 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
@@ -1713,8 +1822,20 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 
 	tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
 
-	return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg,
-				  meta->insn.dst_reg * 2, size);
+	return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
+					 tmp_reg, meta->insn.dst_reg * 2, size);
+}
+
+static int
+mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+	     unsigned int size)
+{
+	swreg tmp_reg;
+
+	tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+
+	return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
+					 tmp_reg, meta->insn.dst_reg * 2, size);
 }
 
 static int
@@ -1738,6 +1859,9 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 		return mem_ldx_stack(nfp_prog, meta, size,
 				     meta->ptr.off + meta->ptr.var_off.value);
 
+	if (meta->ptr.type == PTR_TO_MAP_VALUE)
+		return mem_ldx_emem(nfp_prog, meta, size);
+
 	return -EOPNOTSUPP;
 }
 
@@ -2058,6 +2182,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 	switch (meta->insn.imm) {
 	case BPF_FUNC_xdp_adjust_head:
 		return adjust_head(nfp_prog, meta);
+	case BPF_FUNC_map_lookup_elem:
+		return map_lookup_stack(nfp_prog, meta);
 	default:
 		WARN_ONCE(1, "verifier allowed unsupported function\n");
 		return -EOPNOTSUPP;
@@ -2781,6 +2907,11 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
 	}
 }
 
+bool nfp_bpf_supported_opcode(u8 code)
+{
+	return !!instr_cb[code];
+}
+
 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
 {
 	unsigned int i;
@@ -2794,6 +2925,7 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
 
 	for (i = 0; i < nfp_prog->prog_len; i++) {
 		enum nfp_relo_type special;
+		u32 val;
 
 		special = FIELD_GET(OP_RELO_TYPE, prog[i]);
 		switch (special) {
@@ -2813,6 +2945,24 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
 		case RELO_BR_NEXT_PKT:
 			br_set_offset(&prog[i], bv->tgt_done);
 			break;
+		case RELO_BR_HELPER:
+			val = br_get_offset(prog[i]);
+			val -= BR_OFF_RELO;
+			switch (val) {
+			case BPF_FUNC_map_lookup_elem:
+				val = nfp_prog->bpf->helpers.map_lookup;
+				break;
+			default:
+				pr_err("relocation of unknown helper %d\n",
+				       val);
+				err = -EINVAL;
+				goto err_free_prog;
+			}
+			br_set_offset(&prog[i], val);
+			break;
+		case RELO_IMMED_REL:
+			immed_add_value(&prog[i], bv->start_off);
+			break;
 		}
 
 		prog[i] &= ~OP_RELO_TYPE;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index e8cfe30..8823c83 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -251,6 +251,45 @@ nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
 	return 0;
 }
 
+static int
+nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+	struct nfp_bpf_cap_tlv_func __iomem *cap = value;
+
+	if (length < sizeof(*cap)) {
+		nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length);
+		return -EINVAL;
+	}
+
+	switch (readl(&cap->func_id)) {
+	case BPF_FUNC_map_lookup_elem:
+		bpf->helpers.map_lookup = readl(&cap->func_addr);
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
+{
+	struct nfp_bpf_cap_tlv_maps __iomem *cap = value;
+
+	if (length < sizeof(*cap)) {
+		nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length);
+		return -EINVAL;
+	}
+
+	bpf->maps.types = readl(&cap->types);
+	bpf->maps.max_maps = readl(&cap->max_maps);
+	bpf->maps.max_elems = readl(&cap->max_elems);
+	bpf->maps.max_key_sz = readl(&cap->max_key_sz);
+	bpf->maps.max_val_sz = readl(&cap->max_val_sz);
+	bpf->maps.max_elem_sz = readl(&cap->max_elem_sz);
+
+	return 0;
+}
+
 static int nfp_bpf_parse_capabilities(struct nfp_app *app)
 {
 	struct nfp_cpp *cpp = app->pf->cpp;
@@ -276,11 +315,19 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
 			goto err_release_free;
 
 		switch (type) {
+		case NFP_BPF_CAP_TYPE_FUNC:
+			if (nfp_bpf_parse_cap_func(app->priv, value, length))
+				goto err_release_free;
+			break;
 		case NFP_BPF_CAP_TYPE_ADJUST_HEAD:
 			if (nfp_bpf_parse_cap_adjust_head(app->priv, value,
 							  length))
 				goto err_release_free;
 			break;
+		case NFP_BPF_CAP_TYPE_MAPS:
+			if (nfp_bpf_parse_cap_maps(app->priv, value, length))
+				goto err_release_free;
+			break;
 		default:
 			nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
 			break;
@@ -313,6 +360,10 @@ static int nfp_bpf_init(struct nfp_app *app)
 	bpf->app = app;
 	app->priv = bpf;
 
+	skb_queue_head_init(&bpf->cmsg_replies);
+	init_waitqueue_head(&bpf->cmsg_wq);
+	INIT_LIST_HEAD(&bpf->map_list);
+
 	err = nfp_bpf_parse_capabilities(app);
 	if (err)
 		goto err_free_bpf;
@@ -326,7 +377,12 @@ static int nfp_bpf_init(struct nfp_app *app)
 
 static void nfp_bpf_clean(struct nfp_app *app)
 {
-	kfree(app->priv);
+	struct nfp_app_bpf *bpf = app->priv;
+
+	WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
+	WARN_ON(!list_empty(&bpf->map_list));
+	WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
+	kfree(bpf);
 }
 
 const struct nfp_app_type app_bpf = {
@@ -343,6 +399,8 @@ const struct nfp_app_type app_bpf = {
 	.vnic_alloc	= nfp_bpf_vnic_alloc,
 	.vnic_free	= nfp_bpf_vnic_free,
 
+	.ctrl_msg_rx	= nfp_bpf_ctrl_msg_rx,
+
 	.setup_tc	= nfp_bpf_setup_tc,
 	.tc_busy	= nfp_bpf_tc_busy,
 	.bpf		= nfp_ndo_bpf,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 66381af..c476bca 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -37,10 +37,14 @@
 #include <linux/bitfield.h>
 #include <linux/bpf.h>
 #include <linux/bpf_verifier.h>
+#include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/skbuff.h>
 #include <linux/types.h>
+#include <linux/wait.h>
 
 #include "../nfp_asm.h"
+#include "fw.h"
 
 /* For relocation logic use up-most byte of branch instruction as scratch
  * area.  Remember to clear this before sending instructions to HW!
@@ -56,6 +60,9 @@ enum nfp_relo_type {
 	RELO_BR_GO_ABORT,
 	/* external jumps to fixed addresses */
 	RELO_BR_NEXT_PKT,
+	RELO_BR_HELPER,
+	/* immediate relocation against load address */
+	RELO_IMMED_REL,
 };
 
 /* To make absolute relocated branches (branches other than RELO_BR_REL)
@@ -93,16 +100,49 @@ enum pkt_vec {
  * struct nfp_app_bpf - bpf app priv structure
  * @app:		backpointer to the app
  *
+ * @tag_allocator:	bitmap of control message tags in use
+ * @tag_alloc_next:	next tag bit to allocate
+ * @tag_alloc_last:	next tag bit to be freed
+ *
+ * @cmsg_replies:	received cmsg replies waiting to be consumed
+ * @cmsg_wq:		work queue for waiting for cmsg replies
+ *
+ * @map_list:		list of offloaded maps
+ * @maps_in_use:	number of currently offloaded maps
+ * @map_elems_in_use:	number of elements allocated to offloaded maps
+ *
  * @adjust_head:	adjust head capability
  * @flags:		extra flags for adjust head
  * @off_min:		minimal packet offset within buffer required
  * @off_max:		maximum packet offset within buffer required
  * @guaranteed_sub:	amount of negative adjustment guaranteed possible
  * @guaranteed_add:	amount of positive adjustment guaranteed possible
+ *
+ * @maps:		map capability
+ * @types:		supported map types
+ * @max_maps:		max number of maps supported
+ * @max_elems:		max number of entries in each map
+ * @max_key_sz:		max size of map key
+ * @max_val_sz:		max size of map value
+ * @max_elem_sz:	max size of map entry (key + value)
+ *
+ * @helpers:		helper addressess for various calls
+ * @map_lookup:		map lookup helper address
  */
 struct nfp_app_bpf {
 	struct nfp_app *app;
 
+	DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
+	u16 tag_alloc_next;
+	u16 tag_alloc_last;
+
+	struct sk_buff_head cmsg_replies;
+	struct wait_queue_head cmsg_wq;
+
+	struct list_head map_list;
+	unsigned int maps_in_use;
+	unsigned int map_elems_in_use;
+
 	struct nfp_bpf_cap_adjust_head {
 		u32 flags;
 		int off_min;
@@ -110,6 +150,33 @@ struct nfp_app_bpf {
 		int guaranteed_sub;
 		int guaranteed_add;
 	} adjust_head;
+
+	struct {
+		u32 types;
+		u32 max_maps;
+		u32 max_elems;
+		u32 max_key_sz;
+		u32 max_val_sz;
+		u32 max_elem_sz;
+	} maps;
+
+	struct {
+		u32 map_lookup;
+	} helpers;
+};
+
+/**
+ * struct nfp_bpf_map - private per-map data attached to BPF maps for offload
+ * @offmap:	pointer to the offloaded BPF map
+ * @bpf:	back pointer to bpf app private structure
+ * @tid:	table id identifying map on datapath
+ * @l:		link on the nfp_app_bpf->map_list list
+ */
+struct nfp_bpf_map {
+	struct bpf_offloaded_map *offmap;
+	struct nfp_app_bpf *bpf;
+	u32 tid;
+	struct list_head l;
 };
 
 struct nfp_prog;
@@ -131,9 +198,12 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
  * @ptr: pointer type for memory operations
  * @ldst_gather_len: memcpy length gathered from load/store sequence
  * @paired_st: the paired store insn at the head of the sequence
- * @arg2: arg2 for call instructions
  * @ptr_not_const: pointer is not always constant
  * @jmp_dst: destination info for jump instructions
+ * @func_id: function id for call instructions
+ * @arg1: arg1 for call instructions
+ * @arg2: arg2 for call instructions
+ * @arg2_var_off: arg2 changes stack offset on different paths
  * @off: index of first generated machine instruction (in nfp_prog.prog)
  * @n: eBPF instruction number
  * @flags: eBPF instruction extra optimization flags
@@ -151,7 +221,12 @@ struct nfp_insn_meta {
 			bool ptr_not_const;
 		};
 		struct nfp_insn_meta *jmp_dst;
-		struct bpf_reg_state arg2;
+		struct {
+			u32 func_id;
+			struct bpf_reg_state arg1;
+			struct bpf_reg_state arg2;
+			bool arg2_var_off;
+		};
 	};
 	unsigned int off;
 	unsigned short n;
@@ -249,6 +324,7 @@ struct nfp_bpf_vnic {
 
 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
 int nfp_bpf_jit(struct nfp_prog *prog);
+bool nfp_bpf_supported_opcode(u8 code);
 
 extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
 
@@ -266,4 +342,20 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 		  unsigned int insn_idx, unsigned int n_insns);
 
 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
+
+long long int
+nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
+void
+nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
+int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
+				void *next_key);
+int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
+			      void *key, void *value, u64 flags);
+int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
+int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
+			      void *key, void *value);
+int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
+			       void *key, void *next_key);
+
+void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
 #endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 320b225..e2859b2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -36,6 +36,9 @@
  * Netronome network device driver: TC offload functions for PF and VF
  */
 
+#define pr_fmt(fmt)	"NFP net bpf: " fmt
+
+#include <linux/bpf.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
 #include <linux/pci.h>
@@ -153,6 +156,103 @@ static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
 	return 0;
 }
 
+static int
+nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
+			 void *key, void *next_key)
+{
+	if (!key)
+		return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
+	return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
+}
+
+static int
+nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
+{
+	return nfp_bpf_ctrl_del_entry(offmap, key);
+}
+
+static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
+	.map_get_next_key	= nfp_bpf_map_get_next_key,
+	.map_lookup_elem	= nfp_bpf_ctrl_lookup_entry,
+	.map_update_elem	= nfp_bpf_ctrl_update_entry,
+	.map_delete_elem	= nfp_bpf_map_delete_elem,
+};
+
+static int
+nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
+{
+	struct nfp_bpf_map *nfp_map;
+	long long int res;
+
+	if (!bpf->maps.types)
+		return -EOPNOTSUPP;
+
+	if (offmap->map.map_flags ||
+	    offmap->map.numa_node != NUMA_NO_NODE) {
+		pr_info("map flags are not supported\n");
+		return -EINVAL;
+	}
+
+	if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
+		pr_info("map type not supported\n");
+		return -EOPNOTSUPP;
+	}
+	if (bpf->maps.max_maps == bpf->maps_in_use) {
+		pr_info("too many maps for a device\n");
+		return -ENOMEM;
+	}
+	if (bpf->maps.max_elems - bpf->map_elems_in_use <
+	    offmap->map.max_entries) {
+		pr_info("map with too many elements: %u, left: %u\n",
+			offmap->map.max_entries,
+			bpf->maps.max_elems - bpf->map_elems_in_use);
+		return -ENOMEM;
+	}
+	if (offmap->map.key_size > bpf->maps.max_key_sz ||
+	    offmap->map.value_size > bpf->maps.max_val_sz ||
+	    round_up(offmap->map.key_size, 8) +
+	    round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
+		pr_info("elements don't fit in device constraints\n");
+		return -ENOMEM;
+	}
+
+	nfp_map = kzalloc(sizeof(*nfp_map), GFP_USER);
+	if (!nfp_map)
+		return -ENOMEM;
+
+	offmap->dev_priv = nfp_map;
+	nfp_map->offmap = offmap;
+	nfp_map->bpf = bpf;
+
+	res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
+	if (res < 0) {
+		kfree(nfp_map);
+		return res;
+	}
+
+	nfp_map->tid = res;
+	offmap->dev_ops = &nfp_bpf_map_ops;
+	bpf->maps_in_use++;
+	bpf->map_elems_in_use += offmap->map.max_entries;
+	list_add_tail(&nfp_map->l, &bpf->map_list);
+
+	return 0;
+}
+
+static int
+nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
+{
+	struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+
+	nfp_bpf_ctrl_free_map(bpf, nfp_map);
+	list_del_init(&nfp_map->l);
+	bpf->map_elems_in_use -= offmap->map.max_entries;
+	bpf->maps_in_use--;
+	kfree(nfp_map);
+
+	return 0;
+}
+
 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
 {
 	switch (bpf->command) {
@@ -162,6 +262,10 @@ int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
 		return nfp_bpf_translate(nn, bpf->offload.prog);
 	case BPF_OFFLOAD_DESTROY:
 		return nfp_bpf_destroy(nn, bpf->offload.prog);
+	case BPF_OFFLOAD_MAP_ALLOC:
+		return nfp_bpf_map_alloc(app->priv, bpf->offmap);
+	case BPF_OFFLOAD_MAP_FREE:
+		return nfp_bpf_map_free(app->priv, bpf->offmap);
 	default:
 		return -EINVAL;
 	}
@@ -237,7 +341,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
 	int err;
 
 	if (prog) {
-		struct bpf_dev_offload *offload = prog->aux->offload;
+		struct bpf_prog_offload *offload = prog->aux->offload;
 
 		if (!offload)
 			return -EINVAL;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 7890d95..479f602 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -110,9 +110,11 @@ static int
 nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
 		   struct nfp_insn_meta *meta)
 {
+	const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
 	const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
 	struct nfp_app_bpf *bpf = nfp_prog->bpf;
 	u32 func_id = meta->insn.imm;
+	s64 off, old_off;
 
 	switch (func_id) {
 	case BPF_FUNC_xdp_adjust_head:
@@ -127,11 +129,50 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
 
 		nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
 		break;
+
+	case BPF_FUNC_map_lookup_elem:
+		if (!bpf->helpers.map_lookup) {
+			pr_vlog(env, "map_lookup: not supported by FW\n");
+			return -EOPNOTSUPP;
+		}
+		if (reg2->type != PTR_TO_STACK) {
+			pr_vlog(env,
+				"map_lookup: unsupported key ptr type %d\n",
+				reg2->type);
+			return -EOPNOTSUPP;
+		}
+		if (!tnum_is_const(reg2->var_off)) {
+			pr_vlog(env, "map_lookup: variable key pointer\n");
+			return -EOPNOTSUPP;
+		}
+
+		off = reg2->var_off.value + reg2->off;
+		if (-off % 4) {
+			pr_vlog(env,
+				"map_lookup: unaligned stack pointer %lld\n",
+				-off);
+			return -EOPNOTSUPP;
+		}
+
+		/* Rest of the checks is only if we re-parse the same insn */
+		if (!meta->func_id)
+			break;
+
+		old_off = meta->arg2.var_off.value + meta->arg2.off;
+		meta->arg2_var_off |= off != old_off;
+
+		if (meta->arg1.map_ptr != reg1->map_ptr) {
+			pr_vlog(env, "map_lookup: called for different map\n");
+			return -EOPNOTSUPP;
+		}
+		break;
 	default:
 		pr_vlog(env, "unsupported function id: %d\n", func_id);
 		return -EOPNOTSUPP;
 	}
 
+	meta->func_id = func_id;
+	meta->arg1 = *reg1;
 	meta->arg2 = *reg2;
 
 	return 0;
@@ -210,6 +251,7 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 
 	if (reg->type != PTR_TO_CTX &&
 	    reg->type != PTR_TO_STACK &&
+	    reg->type != PTR_TO_MAP_VALUE &&
 	    reg->type != PTR_TO_PACKET) {
 		pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
 		return -EINVAL;
@@ -221,6 +263,13 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 			return err;
 	}
 
+	if (reg->type == PTR_TO_MAP_VALUE) {
+		if (is_mbpf_store(meta)) {
+			pr_vlog(env, "map writes not supported\n");
+			return -EOPNOTSUPP;
+		}
+	}
+
 	if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
 		pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
 			meta->ptr.type, reg->type);
@@ -241,6 +290,12 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
 	meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
 	nfp_prog->verifier_meta = meta;
 
+	if (!nfp_bpf_supported_opcode(meta->insn.code)) {
+		pr_vlog(env, "instruction %#02x not supported\n",
+			meta->insn.code);
+		return -EINVAL;
+	}
+
 	if (meta->insn.src_reg >= MAX_BPF_REG ||
 	    meta->insn.dst_reg >= MAX_BPF_REG) {
 		pr_vlog(env, "program uses extended registers - jit hardening?\n");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 32ff46a..6a6eb02 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -165,6 +165,7 @@ struct nfp_app {
 	void *priv;
 };
 
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
 
 static inline int nfp_app_init(struct nfp_app *app)
@@ -326,6 +327,14 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
 	return app->type->xdp_offload(app, nn, prog);
 }
 
+static inline bool __nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
+{
+	trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
+			    skb->data, skb->len);
+
+	return __nfp_ctrl_tx(app->ctrl, skb);
+}
+
 static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
 {
 	trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index 9ee3a3f..3f6952b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -50,6 +50,11 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
 	[CMD_TGT_READ_SWAP_LE] =	{ 0x03, 0x40 },
 };
 
+static bool unreg_is_imm(u16 reg)
+{
+	return (reg & UR_REG_IMM) == UR_REG_IMM;
+}
+
 u16 br_get_offset(u64 instr)
 {
 	u16 addr_lo, addr_hi;
@@ -80,6 +85,59 @@ void br_add_offset(u64 *instr, u16 offset)
 	br_set_offset(instr, addr + offset);
 }
 
+static bool immed_can_modify(u64 instr)
+{
+	if (FIELD_GET(OP_IMMED_INV, instr) ||
+	    FIELD_GET(OP_IMMED_SHIFT, instr) ||
+	    FIELD_GET(OP_IMMED_WIDTH, instr) != IMMED_WIDTH_ALL) {
+		pr_err("Can't decode/encode immed!\n");
+		return false;
+	}
+	return true;
+}
+
+u16 immed_get_value(u64 instr)
+{
+	u16 reg;
+
+	if (!immed_can_modify(instr))
+		return 0;
+
+	reg = FIELD_GET(OP_IMMED_A_SRC, instr);
+	if (!unreg_is_imm(reg))
+		reg = FIELD_GET(OP_IMMED_B_SRC, instr);
+
+	return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr);
+}
+
+void immed_set_value(u64 *instr, u16 immed)
+{
+	if (!immed_can_modify(*instr))
+		return;
+
+	if (unreg_is_imm(FIELD_GET(OP_IMMED_A_SRC, *instr))) {
+		*instr &= ~FIELD_PREP(OP_IMMED_A_SRC, 0xff);
+		*instr |= FIELD_PREP(OP_IMMED_A_SRC, immed & 0xff);
+	} else {
+		*instr &= ~FIELD_PREP(OP_IMMED_B_SRC, 0xff);
+		*instr |= FIELD_PREP(OP_IMMED_B_SRC, immed & 0xff);
+	}
+
+	*instr &= ~OP_IMMED_IMM;
+	*instr |= FIELD_PREP(OP_IMMED_IMM, immed >> 8);
+}
+
+void immed_add_value(u64 *instr, u16 offset)
+{
+	u16 val;
+
+	if (!immed_can_modify(*instr))
+		return;
+
+	val = immed_get_value(*instr);
+	immed_set_value(instr, val + offset);
+}
+
 static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
 {
 	bool lm_id, lm_dec = false;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index 20e51cb..5f9291d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -138,6 +138,10 @@ enum immed_shift {
 	IMMED_SHIFT_2B = 2,
 };
 
+u16 immed_get_value(u64 instr);
+void immed_set_value(u64 *instr, u16 immed);
+void immed_add_value(u64 *instr, u16 offset);
+
 #define OP_SHF_BASE		0x08000000000ULL
 #define OP_SHF_A_SRC		0x000000000ffULL
 #define OP_SHF_SC		0x00000000300ULL
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 0e564cf..6f6e3d6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -839,6 +839,18 @@ static inline const char *nfp_net_name(struct nfp_net *nn)
 	return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
 }
 
+static inline void nfp_ctrl_lock(struct nfp_net *nn)
+	__acquires(&nn->r_vecs[0].lock)
+{
+	spin_lock_bh(&nn->r_vecs[0].lock);
+}
+
+static inline void nfp_ctrl_unlock(struct nfp_net *nn)
+	__releases(&nn->r_vecs[0].lock)
+{
+	spin_unlock_bh(&nn->r_vecs[0].lock);
+}
+
 /* Globals */
 extern const char nfp_driver_version[];
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 07e0587..2b5cad3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1920,6 +1920,13 @@ nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
 	return false;
 }
 
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+	struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+
+	return nfp_ctrl_tx_one(nn, r_vec, skb, false);
+}
+
 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
 {
 	struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 00b8c64..e1dae06 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -331,7 +331,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
 	    ls >= ARRAY_SIZE(ls_to_ethtool))
 		return 0;
 
-	cmd->base.speed = ls_to_ethtool[sts];
+	cmd->base.speed = ls_to_ethtool[ls];
 	cmd->base.duplex = DUPLEX_FULL;
 
 	return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 217b62a..3e57bf5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -776,6 +776,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
 	int rc = 0;
 	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
 	bool b_ret_ent = true;
+	bool eblock;
 
 	if (!p_hwfn)
 		return -EINVAL;
@@ -794,6 +795,11 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
 	if (rc)
 		goto spq_post_fail;
 
+	/* Check if entry is in block mode before qed_spq_add_entry,
+	 * which might kfree p_ent.
+	 */
+	eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
+
 	/* Add the request to the pending queue */
 	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
 	if (rc)
@@ -811,7 +817,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
 
 	spin_unlock_bh(&p_spq->lock);
 
-	if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
+	if (eblock) {
 		/* For entries in QED BLOCK mode, the completion code cannot
 		 * perform the necessary cleanup - if it did, we couldn't
 		 * access p_ent here to see whether it's successful or not.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a0fe059..a197e11 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2089,8 +2089,8 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
 		add_reg(CSMR);
 	if (cd->select_mii)
 		add_reg(RMII_MII);
-	add_reg(ARSTR);
 	if (cd->tsu) {
+		add_tsu_reg(ARSTR);
 		add_tsu_reg(TSU_CTRST);
 		add_tsu_reg(TSU_FWEN0);
 		add_tsu_reg(TSU_FWEN1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 4404650b..5270d26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -40,9 +40,7 @@
 #define PRG_ETH0_CLK_M250_DIV_SHIFT	7
 #define PRG_ETH0_CLK_M250_DIV_WIDTH	3
 
-/* divides the result of m25_sel by either 5 (bit unset) or 10 (bit set) */
-#define PRG_ETH0_CLK_M25_DIV_SHIFT	10
-#define PRG_ETH0_CLK_M25_DIV_WIDTH	1
+#define PRG_ETH0_RGMII_TX_CLK_EN	10
 
 #define PRG_ETH0_INVERTED_RMII_CLK	BIT(11)
 #define PRG_ETH0_TX_AND_PHY_REF_CLK	BIT(12)
@@ -63,8 +61,11 @@ struct meson8b_dwmac {
 	struct clk_divider	m250_div;
 	struct clk		*m250_div_clk;
 
-	struct clk_divider	m25_div;
-	struct clk		*m25_div_clk;
+	struct clk_fixed_factor	fixed_div2;
+	struct clk		*fixed_div2_clk;
+
+	struct clk_gate		rgmii_tx_en;
+	struct clk		*rgmii_tx_en_clk;
 
 	u32			tx_delay_ns;
 };
@@ -81,7 +82,7 @@ static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
 	writel(data, dwmac->regs + reg);
 }
 
-static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
+static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
 {
 	struct clk_init_data init;
 	int i, ret;
@@ -89,11 +90,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
 	char clk_name[32];
 	const char *clk_div_parents[1];
 	const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
-	static const struct clk_div_table clk_25m_div_table[] = {
-		{ .val = 0, .div = 5 },
-		{ .val = 1, .div = 10 },
-		{ /* sentinel */ },
-	};
 
 	/* get the mux parents from DT */
 	for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
@@ -116,7 +112,7 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
 	snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev));
 	init.name = clk_name;
 	init.ops = &clk_mux_ops;
-	init.flags = 0;
+	init.flags = CLK_SET_RATE_PARENT;
 	init.parent_names = mux_parent_names;
 	init.num_parents = MUX_CLK_NUM_PARENTS;
 
@@ -144,31 +140,48 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
 	dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
 	dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
 	dwmac->m250_div.hw.init = &init;
-	dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+	dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED |
+				CLK_DIVIDER_ALLOW_ZERO |
+				CLK_DIVIDER_ROUND_CLOSEST;
 
 	dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw);
 	if (WARN_ON(IS_ERR(dwmac->m250_div_clk)))
 		return PTR_ERR(dwmac->m250_div_clk);
 
-	/* create the m25_div */
-	snprintf(clk_name, sizeof(clk_name), "%s#m25_div", dev_name(dev));
+	/* create the fixed_div2 */
+	snprintf(clk_name, sizeof(clk_name), "%s#fixed_div2", dev_name(dev));
 	init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL);
-	init.ops = &clk_divider_ops;
-	init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+	init.ops = &clk_fixed_factor_ops;
+	init.flags = CLK_SET_RATE_PARENT;
 	clk_div_parents[0] = __clk_get_name(dwmac->m250_div_clk);
 	init.parent_names = clk_div_parents;
 	init.num_parents = ARRAY_SIZE(clk_div_parents);
 
-	dwmac->m25_div.reg = dwmac->regs + PRG_ETH0;
-	dwmac->m25_div.shift = PRG_ETH0_CLK_M25_DIV_SHIFT;
-	dwmac->m25_div.width = PRG_ETH0_CLK_M25_DIV_WIDTH;
-	dwmac->m25_div.table = clk_25m_div_table;
-	dwmac->m25_div.hw.init = &init;
-	dwmac->m25_div.flags = CLK_DIVIDER_ALLOW_ZERO;
+	dwmac->fixed_div2.mult = 1;
+	dwmac->fixed_div2.div = 2;
+	dwmac->fixed_div2.hw.init = &init;
 
-	dwmac->m25_div_clk = devm_clk_register(dev, &dwmac->m25_div.hw);
-	if (WARN_ON(IS_ERR(dwmac->m25_div_clk)))
-		return PTR_ERR(dwmac->m25_div_clk);
+	dwmac->fixed_div2_clk = devm_clk_register(dev, &dwmac->fixed_div2.hw);
+	if (WARN_ON(IS_ERR(dwmac->fixed_div2_clk)))
+		return PTR_ERR(dwmac->fixed_div2_clk);
+
+	/* create the rgmii_tx_en */
+	init.name = devm_kasprintf(dev, GFP_KERNEL, "%s#rgmii_tx_en",
+				   dev_name(dev));
+	init.ops = &clk_gate_ops;
+	init.flags = CLK_SET_RATE_PARENT;
+	clk_div_parents[0] = __clk_get_name(dwmac->fixed_div2_clk);
+	init.parent_names = clk_div_parents;
+	init.num_parents = ARRAY_SIZE(clk_div_parents);
+
+	dwmac->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0;
+	dwmac->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN;
+	dwmac->rgmii_tx_en.hw.init = &init;
+
+	dwmac->rgmii_tx_en_clk = devm_clk_register(dev,
+						   &dwmac->rgmii_tx_en.hw);
+	if (WARN_ON(IS_ERR(dwmac->rgmii_tx_en_clk)))
+		return PTR_ERR(dwmac->rgmii_tx_en_clk);
 
 	return 0;
 }
@@ -176,7 +189,6 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
 static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
 {
 	int ret;
-	unsigned long clk_rate;
 	u8 tx_dly_val = 0;
 
 	switch (dwmac->phy_mode) {
@@ -191,9 +203,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
 
 	case PHY_INTERFACE_MODE_RGMII_ID:
 	case PHY_INTERFACE_MODE_RGMII_TXID:
-		/* Generate a 25MHz clock for the PHY */
-		clk_rate = 25 * 1000 * 1000;
-
 		/* enable RGMII mode */
 		meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
 					PRG_ETH0_RGMII_MODE);
@@ -204,12 +213,28 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
 
 		meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
 					tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
+
+		/* Configure the 125MHz RGMII TX clock, the IP block changes
+		 * the output automatically (= without us having to configure
+		 * a register) based on the line-speed (125MHz for Gbit speeds,
+		 * 25MHz for 100Mbit/s and 2.5MHz for 10Mbit/s).
+		 */
+		ret = clk_set_rate(dwmac->rgmii_tx_en_clk, 125 * 1000 * 1000);
+		if (ret) {
+			dev_err(&dwmac->pdev->dev,
+				"failed to set RGMII TX clock\n");
+			return ret;
+		}
+
+		ret = clk_prepare_enable(dwmac->rgmii_tx_en_clk);
+		if (ret) {
+			dev_err(&dwmac->pdev->dev,
+				"failed to enable the RGMII TX clock\n");
+			return ret;
+		}
 		break;
 
 	case PHY_INTERFACE_MODE_RMII:
-		/* Use the rate of the mux clock for the internal RMII PHY */
-		clk_rate = clk_get_rate(dwmac->m250_mux_clk);
-
 		/* disable RGMII mode -> enables RMII mode */
 		meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
 					0);
@@ -231,20 +256,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
 		return -EINVAL;
 	}
 
-	ret = clk_prepare_enable(dwmac->m25_div_clk);
-	if (ret) {
-		dev_err(&dwmac->pdev->dev, "failed to enable the PHY clock\n");
-		return ret;
-	}
-
-	ret = clk_set_rate(dwmac->m25_div_clk, clk_rate);
-	if (ret) {
-		clk_disable_unprepare(dwmac->m25_div_clk);
-
-		dev_err(&dwmac->pdev->dev, "failed to set PHY clock\n");
-		return ret;
-	}
-
 	/* enable TX_CLK and PHY_REF_CLK generator */
 	meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TX_AND_PHY_REF_CLK,
 				PRG_ETH0_TX_AND_PHY_REF_CLK);
@@ -294,7 +305,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
 				 &dwmac->tx_delay_ns))
 		dwmac->tx_delay_ns = 2;
 
-	ret = meson8b_init_clk(dwmac);
+	ret = meson8b_init_rgmii_tx_clk(dwmac);
 	if (ret)
 		goto err_remove_config_dt;
 
@@ -311,7 +322,8 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
 	return 0;
 
 err_clk_disable:
-	clk_disable_unprepare(dwmac->m25_div_clk);
+	if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
+		clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
 err_remove_config_dt:
 	stmmac_remove_config_dt(pdev, plat_dat);
 
@@ -322,7 +334,8 @@ static int meson8b_dwmac_remove(struct platform_device *pdev)
 {
 	struct meson8b_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
 
-	clk_disable_unprepare(dwmac->m25_div_clk);
+	if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
+		clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
 
 	return stmmac_pltfr_remove(pdev);
 }
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 6bd11a0..b13eed2 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1660,13 +1660,13 @@ EXPORT_SYMBOL(genphy_config_init);
 
 int genphy_suspend(struct phy_device *phydev)
 {
-	return phy_modify(phydev, MII_BMCR, 0, BMCR_PDOWN);
+	return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
 }
 EXPORT_SYMBOL(genphy_suspend);
 
 int genphy_resume(struct phy_device *phydev)
 {
-	return phy_modify(phydev, MII_BMCR, BMCR_PDOWN, 0);
+	return phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
 }
 EXPORT_SYMBOL(genphy_resume);
 
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 7c1bf68..ee3ca4a 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -41,37 +41,14 @@ MODULE_DESCRIPTION("Realtek PHY driver");
 MODULE_AUTHOR("Johnson Leung");
 MODULE_LICENSE("GPL");
 
-static int rtl8211x_page_read(struct phy_device *phydev, u16 page, u16 address)
+static int rtl821x_read_page(struct phy_device *phydev)
 {
-	int ret;
-
-	ret = phy_write(phydev, RTL821x_PAGE_SELECT, page);
-	if (ret)
-		return ret;
-
-	ret = phy_read(phydev, address);
-
-	/* restore to default page 0 */
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
-
-	return ret;
+	return __phy_read(phydev, RTL821x_PAGE_SELECT);
 }
 
-static int rtl8211x_page_write(struct phy_device *phydev, u16 page,
-			       u16 address, u16 val)
+static int rtl821x_write_page(struct phy_device *phydev, int page)
 {
-	int ret;
-
-	ret = phy_write(phydev, RTL821x_PAGE_SELECT, page);
-	if (ret)
-		return ret;
-
-	ret = phy_write(phydev, address, val);
-
-	/* restore to default page 0 */
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
-
-	return ret;
+	return __phy_write(phydev, RTL821x_PAGE_SELECT, page);
 }
 
 static int rtl8201_ack_interrupt(struct phy_device *phydev)
@@ -96,7 +73,7 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev)
 {
 	int err;
 
-	err = rtl8211x_page_read(phydev, 0xa43, RTL8211F_INSR);
+	err = phy_read_paged(phydev, 0xa43, RTL8211F_INSR);
 
 	return (err < 0) ? err : 0;
 }
@@ -110,7 +87,7 @@ static int rtl8201_config_intr(struct phy_device *phydev)
 	else
 		val = 0;
 
-	return rtl8211x_page_write(phydev, 0x7, RTL8201F_IER, val);
+	return phy_write_paged(phydev, 0x7, RTL8201F_IER, val);
 }
 
 static int rtl8211b_config_intr(struct phy_device *phydev)
@@ -148,36 +125,24 @@ static int rtl8211f_config_intr(struct phy_device *phydev)
 	else
 		val = 0;
 
-	return rtl8211x_page_write(phydev, 0xa42, RTL821x_INER, val);
+	return phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
 }
 
 static int rtl8211f_config_init(struct phy_device *phydev)
 {
 	int ret;
-	u16 val;
+	u16 val = 0;
 
 	ret = genphy_config_init(phydev);
 	if (ret < 0)
 		return ret;
 
-	ret = rtl8211x_page_read(phydev, 0xd08, 0x11);
-	if (ret < 0)
-		return ret;
-
-	val = ret & 0xffff;
-
 	/* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */
 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
-		val |= RTL8211F_TX_DELAY;
-	else
-		val &= ~RTL8211F_TX_DELAY;
+		val = RTL8211F_TX_DELAY;
 
-	ret = rtl8211x_page_write(phydev, 0xd08, 0x11, val);
-	if (ret)
-		return ret;
-
-	return 0;
+	return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val);
 }
 
 static struct phy_driver realtek_drvs[] = {
@@ -197,6 +162,8 @@ static struct phy_driver realtek_drvs[] = {
 		.config_intr	= &rtl8201_config_intr,
 		.suspend	= genphy_suspend,
 		.resume		= genphy_resume,
+		.read_page	= rtl821x_read_page,
+		.write_page	= rtl821x_write_page,
 	}, {
 		.phy_id		= 0x001cc912,
 		.name		= "RTL8211B Gigabit Ethernet",
@@ -236,6 +203,8 @@ static struct phy_driver realtek_drvs[] = {
 		.config_intr	= &rtl8211f_config_intr,
 		.suspend	= genphy_suspend,
 		.resume		= genphy_resume,
+		.read_page	= rtl821x_read_page,
+		.write_page	= rtl821x_write_page,
 	},
 };
 
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index d8e5747..264d4af 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1006,17 +1006,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
 	if (!ifname_is_set)
 		snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
 
+	mutex_unlock(&pn->all_ppp_mutex);
+
 	ret = register_netdevice(ppp->dev);
 	if (ret < 0)
 		goto err_unit;
 
 	atomic_inc(&ppp_unit_count);
 
-	mutex_unlock(&pn->all_ppp_mutex);
-
 	return 0;
 
 err_unit:
+	mutex_lock(&pn->all_ppp_mutex);
 	unit_put(&pn->units_idr, ppp->file.index);
 err:
 	mutex_unlock(&pn->all_ppp_mutex);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2fba3be..170a3e8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -196,7 +196,7 @@ struct tun_flow_entry {
 
 #define TUN_NUM_FLOW_ENTRIES 1024
 
-struct tun_steering_prog {
+struct tun_prog {
 	struct rcu_head rcu;
 	struct bpf_prog *prog;
 };
@@ -238,7 +238,13 @@ struct tun_struct {
 	u32 rx_batched;
 	struct tun_pcpu_stats __percpu *pcpu_stats;
 	struct bpf_prog __rcu *xdp_prog;
-	struct tun_steering_prog __rcu *steering_prog;
+	struct tun_prog __rcu *steering_prog;
+	struct tun_prog __rcu *filter_prog;
+};
+
+struct veth {
+	__be16 h_vlan_proto;
+	__be16 h_vlan_TCI;
 };
 
 bool tun_is_xdp_buff(void *ptr)
@@ -590,7 +596,7 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 
 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 {
-	struct tun_steering_prog *prog;
+	struct tun_prog *prog;
 	u16 ret = 0;
 
 	prog = rcu_dereference(tun->steering_prog);
@@ -1036,12 +1042,25 @@ static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
 #endif
 }
 
+static unsigned int run_ebpf_filter(struct tun_struct *tun,
+				    struct sk_buff *skb,
+				    int len)
+{
+	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
+
+	if (prog)
+		len = bpf_prog_run_clear_cb(prog->prog, skb);
+
+	return len;
+}
+
 /* Net device start xmit */
 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct tun_struct *tun = netdev_priv(dev);
 	int txq = skb->queue_mapping;
 	struct tun_file *tfile;
+	int len = skb->len;
 
 	rcu_read_lock();
 	tfile = rcu_dereference(tun->tfiles[txq]);
@@ -1067,6 +1086,15 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 	    sk_filter(tfile->socket.sk, skb))
 		goto drop;
 
+	len = run_ebpf_filter(tun, skb, len);
+
+	/* Trim extra bytes since we may insert vlan proto & TCI
+	 * in tun_put_user().
+	 */
+	len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0;
+	if (len <= 0 || pskb_trim(skb, len))
+		goto drop;
+
 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
 		goto drop;
 
@@ -2054,10 +2082,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
 
 	if (vlan_hlen) {
 		int ret;
-		struct {
-			__be16 h_vlan_proto;
-			__be16 h_vlan_TCI;
-		} veth;
+		struct veth veth;
 
 		veth.h_vlan_proto = skb->vlan_proto;
 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
@@ -2184,19 +2209,18 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
 	return ret;
 }
 
-static void tun_steering_prog_free(struct rcu_head *rcu)
+static void tun_prog_free(struct rcu_head *rcu)
 {
-	struct tun_steering_prog *prog = container_of(rcu,
-					 struct tun_steering_prog, rcu);
+	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
 
 	bpf_prog_destroy(prog->prog);
 	kfree(prog);
 }
 
-static int __tun_set_steering_ebpf(struct tun_struct *tun,
-				   struct bpf_prog *prog)
+static int __tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
+			  struct bpf_prog *prog)
 {
-	struct tun_steering_prog *old, *new = NULL;
+	struct tun_prog *old, *new = NULL;
 
 	if (prog) {
 		new = kmalloc(sizeof(*new), GFP_KERNEL);
@@ -2206,13 +2230,13 @@ static int __tun_set_steering_ebpf(struct tun_struct *tun,
 	}
 
 	spin_lock_bh(&tun->lock);
-	old = rcu_dereference_protected(tun->steering_prog,
+	old = rcu_dereference_protected(*prog_p,
 					lockdep_is_held(&tun->lock));
-	rcu_assign_pointer(tun->steering_prog, new);
+	rcu_assign_pointer(*prog_p, new);
 	spin_unlock_bh(&tun->lock);
 
 	if (old)
-		call_rcu(&old->rcu, tun_steering_prog_free);
+		call_rcu(&old->rcu, tun_prog_free);
 
 	return 0;
 }
@@ -2225,7 +2249,8 @@ static void tun_free_netdev(struct net_device *dev)
 	free_percpu(tun->pcpu_stats);
 	tun_flow_uninit(tun);
 	security_tun_dev_free_security(tun->security);
-	__tun_set_steering_ebpf(tun, NULL);
+	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
+	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
 }
 
 static void tun_setup(struct net_device *dev)
@@ -2720,7 +2745,8 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
 	return ret;
 }
 
-static int tun_set_steering_ebpf(struct tun_struct *tun, void __user *data)
+static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
+			void __user *data)
 {
 	struct bpf_prog *prog;
 	int fd;
@@ -2736,7 +2762,7 @@ static int tun_set_steering_ebpf(struct tun_struct *tun, void __user *data)
 			return PTR_ERR(prog);
 	}
 
-	return __tun_set_steering_ebpf(tun, prog);
+	return __tun_set_ebpf(tun, prog_p, prog);
 }
 
 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
@@ -3016,7 +3042,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 		break;
 
 	case TUNSETSTEERINGEBPF:
-		ret = tun_set_steering_ebpf(tun, argp);
+		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
+		break;
+
+	case TUNSETFILTEREBPF:
+		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
 		break;
 
 	default:
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 94c7804..ec56ff2 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2396,6 +2396,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
 		dev->rx_qlen = 4;
+		dev->tx_qlen = 4;
 	}
 
 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7836737..e542555 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -489,6 +489,7 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
 
 static spinlock_t hwsim_radio_lock;
 static LIST_HEAD(hwsim_radios);
+static struct workqueue_struct *hwsim_wq;
 static int hwsim_radio_idx;
 
 static struct platform_driver mac80211_hwsim_driver = {
@@ -3125,6 +3126,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
 	if (info->attrs[HWSIM_ATTR_CHANNELS])
 		param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
 
+	if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
+		GENL_SET_ERR_MSG(info, "too many channels specified");
+		return -EINVAL;
+	}
+
 	if (info->attrs[HWSIM_ATTR_NO_VIF])
 		param.no_vif = true;
 
@@ -3347,7 +3353,7 @@ static void remove_user_radios(u32 portid)
 		if (entry->destroy_on_close && entry->portid == portid) {
 			list_del(&entry->list);
 			INIT_WORK(&entry->destroy_work, destroy_radio);
-			schedule_work(&entry->destroy_work);
+			queue_work(hwsim_wq, &entry->destroy_work);
 		}
 	}
 	spin_unlock_bh(&hwsim_radio_lock);
@@ -3422,7 +3428,7 @@ static void __net_exit hwsim_exit_net(struct net *net)
 
 		list_del(&data->list);
 		INIT_WORK(&data->destroy_work, destroy_radio);
-		schedule_work(&data->destroy_work);
+		queue_work(hwsim_wq, &data->destroy_work);
 	}
 	spin_unlock_bh(&hwsim_radio_lock);
 }
@@ -3454,6 +3460,10 @@ static int __init init_mac80211_hwsim(void)
 
 	spin_lock_init(&hwsim_radio_lock);
 
+	hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
+	if (!hwsim_wq)
+		return -ENOMEM;
+
 	err = register_pernet_device(&hwsim_net_ops);
 	if (err)
 		return err;
@@ -3592,8 +3602,11 @@ static void __exit exit_mac80211_hwsim(void)
 	hwsim_exit_netlink();
 
 	mac80211_hwsim_free();
+	flush_workqueue(hwsim_wq);
+
 	unregister_netdev(hwsim_mon);
 	platform_driver_unregister(&mac80211_hwsim_driver);
 	unregister_pernet_device(&hwsim_net_ops);
+	destroy_workqueue(hwsim_wq);
 }
 module_exit(exit_mac80211_hwsim);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 76b4fe6..894c2cc 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -74,6 +74,7 @@ static struct nvmf_host *nvmf_host_default(void)
 		return NULL;
 
 	kref_init(&host->ref);
+	uuid_gen(&host->id);
 	snprintf(host->nqn, NVMF_NQN_SIZE,
 		"nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
 
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 0f695df..372ce99 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -765,10 +765,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		break;
 	case ASHMEM_SET_SIZE:
 		ret = -EINVAL;
+		mutex_lock(&ashmem_mutex);
 		if (!asma->file) {
 			ret = 0;
 			asma->size = (size_t)arg;
 		}
+		mutex_unlock(&ashmem_mutex);
 		break;
 	case ASHMEM_GET_SIZE:
 		ret = asma->size;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 93eff7de..1b3efb1 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1147,11 +1147,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
 
 	udc = kzalloc(sizeof(*udc), GFP_KERNEL);
 	if (!udc)
-		goto err1;
-
-	ret = device_add(&gadget->dev);
-	if (ret)
-		goto err2;
+		goto err_put_gadget;
 
 	device_initialize(&udc->dev);
 	udc->dev.release = usb_udc_release;
@@ -1160,7 +1156,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
 	udc->dev.parent = parent;
 	ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
 	if (ret)
-		goto err3;
+		goto err_put_udc;
+
+	ret = device_add(&gadget->dev);
+	if (ret)
+		goto err_put_udc;
 
 	udc->gadget = gadget;
 	gadget->udc = udc;
@@ -1170,7 +1170,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
 
 	ret = device_add(&udc->dev);
 	if (ret)
-		goto err4;
+		goto err_unlist_udc;
 
 	usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
 	udc->vbus = true;
@@ -1178,27 +1178,25 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
 	/* pick up one of pending gadget drivers */
 	ret = check_pending_gadget_drivers(udc);
 	if (ret)
-		goto err5;
+		goto err_del_udc;
 
 	mutex_unlock(&udc_lock);
 
 	return 0;
 
-err5:
+ err_del_udc:
 	device_del(&udc->dev);
 
-err4:
+ err_unlist_udc:
 	list_del(&udc->list);
 	mutex_unlock(&udc_lock);
 
-err3:
-	put_device(&udc->dev);
 	device_del(&gadget->dev);
 
-err2:
-	kfree(udc);
+ err_put_udc:
+	put_device(&udc->dev);
 
-err1:
+ err_put_gadget:
 	put_device(&gadget->dev);
 	return ret;
 }
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 465dbf6..f723f7b 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -279,6 +279,8 @@ static int usb3503_probe(struct usb3503 *hub)
 	if (gpio_is_valid(hub->gpio_reset)) {
 		err = devm_gpio_request_one(dev, hub->gpio_reset,
 				GPIOF_OUT_INIT_LOW, "usb3503 reset");
+		/* Datasheet defines a hardware reset to be at least 100us */
+		usleep_range(100, 10000);
 		if (err) {
 			dev_err(dev,
 				"unable to request GPIO %d as reset pin (%d)\n",
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f6ae753..f932f40 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1004,7 +1004,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
 		break;
 
 	case MON_IOCQ_RING_SIZE:
+		mutex_lock(&rp->fetch_lock);
 		ret = rp->b_size;
+		mutex_unlock(&rp->fetch_lock);
 		break;
 
 	case MON_IOCT_RING_SIZE:
@@ -1231,12 +1233,16 @@ static int mon_bin_vma_fault(struct vm_fault *vmf)
 	unsigned long offset, chunk_idx;
 	struct page *pageptr;
 
+	mutex_lock(&rp->fetch_lock);
 	offset = vmf->pgoff << PAGE_SHIFT;
-	if (offset >= rp->b_size)
+	if (offset >= rp->b_size) {
+		mutex_unlock(&rp->fetch_lock);
 		return VM_FAULT_SIGBUS;
+	}
 	chunk_idx = offset / CHUNK_SIZE;
 	pageptr = rp->b_vec[chunk_idx].pg;
 	get_page(pageptr);
+	mutex_unlock(&rp->fetch_lock);
 	vmf->page = pageptr;
 	return 0;
 }
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7c6273b..06d502b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
 	{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
 	{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+	{ USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
 	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
 	{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -174,6 +175,7 @@ static const struct usb_device_id id_table[] = {
 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+	{ USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */
 	{ USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
 	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
 	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index e6127fb..a7d08ae 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -143,6 +143,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_NO_ATA_1X),
 
+/* Reported-by: Icenowy Zheng <icenowy@aosc.io> */
+UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
+		"Norelsys",
+		"NS1068X",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_IGNORE_UAS),
+
 /* Reported-by: Takeo Nakayama <javhera@gmx.com> */
 UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
 		"JMicron",
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 7b219d9..ee2bbce 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -91,7 +91,7 @@ static void usbip_dump_usb_device(struct usb_device *udev)
 	dev_dbg(dev, "       devnum(%d) devpath(%s) usb speed(%s)",
 		udev->devnum, udev->devpath, usb_speed_string(udev->speed));
 
-	pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport);
+	pr_debug("tt hub ttport %d\n", udev->ttport);
 
 	dev_dbg(dev, "                    ");
 	for (i = 0; i < 16; i++)
@@ -124,12 +124,8 @@ static void usbip_dump_usb_device(struct usb_device *udev)
 	}
 	pr_debug("\n");
 
-	dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus);
-
-	dev_dbg(dev,
-		"descriptor %p, config %p, actconfig %p, rawdescriptors %p\n",
-		&udev->descriptor, udev->config,
-		udev->actconfig, udev->rawdescriptors);
+	dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev),
+		udev->bus->bus_name);
 
 	dev_dbg(dev, "have_langid %d, string_langid %d\n",
 		udev->have_langid, udev->string_langid);
@@ -237,9 +233,6 @@ void usbip_dump_urb(struct urb *urb)
 
 	dev = &urb->dev->dev;
 
-	dev_dbg(dev, "   urb                   :%p\n", urb);
-	dev_dbg(dev, "   dev                   :%p\n", urb->dev);
-
 	usbip_dump_usb_device(urb->dev);
 
 	dev_dbg(dev, "   pipe                  :%08x ", urb->pipe);
@@ -248,11 +241,9 @@ void usbip_dump_urb(struct urb *urb)
 
 	dev_dbg(dev, "   status                :%d\n", urb->status);
 	dev_dbg(dev, "   transfer_flags        :%08X\n", urb->transfer_flags);
-	dev_dbg(dev, "   transfer_buffer       :%p\n", urb->transfer_buffer);
 	dev_dbg(dev, "   transfer_buffer_length:%d\n",
 						urb->transfer_buffer_length);
 	dev_dbg(dev, "   actual_length         :%d\n", urb->actual_length);
-	dev_dbg(dev, "   setup_packet          :%p\n", urb->setup_packet);
 
 	if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL)
 		usbip_dump_usb_ctrlrequest(
@@ -262,8 +253,6 @@ void usbip_dump_urb(struct urb *urb)
 	dev_dbg(dev, "   number_of_packets     :%d\n", urb->number_of_packets);
 	dev_dbg(dev, "   interval              :%d\n", urb->interval);
 	dev_dbg(dev, "   error_count           :%d\n", urb->error_count);
-	dev_dbg(dev, "   context               :%p\n", urb->context);
-	dev_dbg(dev, "   complete              :%p\n", urb->complete);
 }
 EXPORT_SYMBOL_GPL(usbip_dump_urb);
 
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
index df1e309..1e8a23d 100644
--- a/drivers/usb/usbip/vudc_rx.c
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -120,6 +120,25 @@ static int v_recv_cmd_submit(struct vudc *udc,
 	urb_p->new = 1;
 	urb_p->seqnum = pdu->base.seqnum;
 
+	if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) {
+		/* validate packet size and number of packets */
+		unsigned int maxp, packets, bytes;
+
+		maxp = usb_endpoint_maxp(urb_p->ep->desc);
+		maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc);
+		bytes = pdu->u.cmd_submit.transfer_buffer_length;
+		packets = DIV_ROUND_UP(bytes, maxp);
+
+		if (pdu->u.cmd_submit.number_of_packets < 0 ||
+		    pdu->u.cmd_submit.number_of_packets > packets) {
+			dev_err(&udc->gadget.dev,
+				"CMD_SUBMIT: isoc invalid num packets %d\n",
+				pdu->u.cmd_submit.number_of_packets);
+			ret = -EMSGSIZE;
+			goto free_urbp;
+		}
+	}
+
 	ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type);
 	if (ret) {
 		usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c
index 1440ae0..3ccb17c 100644
--- a/drivers/usb/usbip/vudc_tx.c
+++ b/drivers/usb/usbip/vudc_tx.c
@@ -85,6 +85,13 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
 	memset(&pdu_header, 0, sizeof(pdu_header));
 	memset(&msg, 0, sizeof(msg));
 
+	if (urb->actual_length > 0 && !urb->transfer_buffer) {
+		dev_err(&udc->gadget.dev,
+			"urb: actual_length %d transfer_buffer null\n",
+			urb->actual_length);
+		return -1;
+	}
+
 	if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
 		iovnum = 2 + urb->number_of_packets;
 	else
@@ -100,8 +107,8 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
 
 	/* 1. setup usbip_header */
 	setup_ret_submit_pdu(&pdu_header, urb_p);
-	usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
-			  pdu_header.base.seqnum, urb);
+	usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+			  pdu_header.base.seqnum);
 	usbip_header_correct_endian(&pdu_header, 1);
 
 	iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 57efbd3..bd56653 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -380,10 +380,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
 		}
 		range = 0;
 		while (range < pages) {
-			if (map->unmap_ops[offset+range].handle == -1) {
-				range--;
+			if (map->unmap_ops[offset+range].handle == -1)
 				break;
-			}
 			range++;
 		}
 		err = __unmap_grant_pages(map, offset, range);
@@ -1073,8 +1071,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 out_unlock_put:
 	mutex_unlock(&priv->lock);
 out_put_map:
-	if (use_ptemod)
+	if (use_ptemod) {
 		map->vma = NULL;
+		unmap_grant_pages(map, 0, map->count);
+	}
 	gntdev_put_map(priv, map);
 	return err;
 }
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5da18eb..83e2349 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -30,7 +30,7 @@
 #include <linux/ratelimit.h>
 #include <linux/uuid.h>
 #include <linux/semaphore.h>
-#include <linux/bpf.h>
+#include <linux/error-injection.h>
 #include <asm/unaligned.h>
 #include "ctree.h"
 #include "disk-io.h"
@@ -3124,7 +3124,7 @@ int open_ctree(struct super_block *sb,
 		goto fail_block_groups;
 	goto retry_root_backup;
 }
-BPF_ALLOW_ERROR_INJECTION(open_ctree);
+ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
 
 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 {
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index fb13828..586bb06 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -22,7 +22,7 @@
 #include <linux/slab.h>
 #include <linux/math64.h>
 #include <linux/ratelimit.h>
-#include <linux/bpf.h>
+#include <linux/error-injection.h>
 #include "ctree.h"
 #include "free-space-cache.h"
 #include "transaction.h"
@@ -333,7 +333,7 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
 
 	return 0;
 }
-BPF_ALLOW_ERROR_INJECTION(io_ctl_init);
+ALLOW_ERROR_INJECTION(io_ctl_init, ERRNO);
 
 static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
 {
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
new file mode 100644
index 0000000..296c654
--- /dev/null
+++ b/include/asm-generic/error-injection.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_ERROR_INJECTION_H
+#define _ASM_GENERIC_ERROR_INJECTION_H
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+enum {
+	EI_ETYPE_NONE,		/* Dummy value for undefined case */
+	EI_ETYPE_NULL,		/* Return NULL if failure */
+	EI_ETYPE_ERRNO,		/* Return -ERRNO if failure */
+	EI_ETYPE_ERRNO_NULL,	/* Return -ERRNO or NULL if failure */
+};
+
+struct error_injection_entry {
+	unsigned long	addr;
+	int		etype;
+};
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+/*
+ * Whitelist ganerating macro. Specify functions which can be
+ * error-injectable using this macro.
+ */
+#define ALLOW_ERROR_INJECTION(fname, _etype)				\
+static struct error_injection_entry __used				\
+	__attribute__((__section__("_error_injection_whitelist")))	\
+	_eil_addr_##fname = {						\
+		.addr = (unsigned long)fname,				\
+		.etype = EI_ETYPE_##_etype,				\
+	};
+#else
+#define ALLOW_ERROR_INJECTION(fname, _etype)
+#endif
+#endif
+
+#endif /* _ASM_GENERIC_ERROR_INJECTION_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index a2e8582..ebe544e 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -136,13 +136,13 @@
 #define KPROBE_BLACKLIST()
 #endif
 
-#ifdef CONFIG_BPF_KPROBE_OVERRIDE
-#define ERROR_INJECT_LIST()	. = ALIGN(8);						\
-				VMLINUX_SYMBOL(__start_kprobe_error_inject_list) = .;	\
-				KEEP(*(_kprobe_error_inject_list))			\
-				VMLINUX_SYMBOL(__stop_kprobe_error_inject_list) = .;
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+#define ERROR_INJECT_WHITELIST()	STRUCT_ALIGN();			      \
+			VMLINUX_SYMBOL(__start_error_injection_whitelist) = .;\
+			KEEP(*(_error_injection_whitelist))		      \
+			VMLINUX_SYMBOL(__stop_error_injection_whitelist) = .;
 #else
-#define ERROR_INJECT_LIST()
+#define ERROR_INJECT_WHITELIST()
 #endif
 
 #ifdef CONFIG_EVENT_TRACING
@@ -573,7 +573,7 @@
 	FTRACE_EVENTS()							\
 	TRACE_SYSCALLS()						\
 	KPROBE_BLACKLIST()						\
-	ERROR_INJECT_LIST()						\
+	ERROR_INJECT_WHITELIST()					\
 	MEM_DISCARD(init.rodata)					\
 	CLK_OF_TABLES()							\
 	RESERVEDMEM_OF_TABLES()						\
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 44f26f6..5c2c104 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -25,6 +25,7 @@ struct bpf_map;
 /* map is generic key/value storage optionally accesible by eBPF programs */
 struct bpf_map_ops {
 	/* funcs callable from userspace (via syscall) */
+	int (*map_alloc_check)(union bpf_attr *attr);
 	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
 	void (*map_release)(struct bpf_map *map, struct file *map_file);
 	void (*map_free)(struct bpf_map *map);
@@ -73,6 +74,33 @@ struct bpf_map {
 	char name[BPF_OBJ_NAME_LEN];
 };
 
+struct bpf_offloaded_map;
+
+struct bpf_map_dev_ops {
+	int (*map_get_next_key)(struct bpf_offloaded_map *map,
+				void *key, void *next_key);
+	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
+			       void *key, void *value);
+	int (*map_update_elem)(struct bpf_offloaded_map *map,
+			       void *key, void *value, u64 flags);
+	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
+};
+
+struct bpf_offloaded_map {
+	struct bpf_map map;
+	struct net_device *netdev;
+	const struct bpf_map_dev_ops *dev_ops;
+	void *dev_priv;
+	struct list_head offloads;
+};
+
+static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
+{
+	return container_of(map, struct bpf_offloaded_map, map);
+}
+
+extern const struct bpf_map_ops bpf_map_offload_ops;
+
 /* function argument constraints */
 enum bpf_arg_type {
 	ARG_DONTCARE = 0,	/* unused argument in helper function */
@@ -199,7 +227,7 @@ struct bpf_prog_offload_ops {
 			 int insn_idx, int prev_insn_idx);
 };
 
-struct bpf_dev_offload {
+struct bpf_prog_offload {
 	struct bpf_prog		*prog;
 	struct net_device	*netdev;
 	void			*dev_priv;
@@ -229,7 +257,7 @@ struct bpf_prog_aux {
 #ifdef CONFIG_SECURITY
 	void *security;
 #endif
-	struct bpf_dev_offload *offload;
+	struct bpf_prog_offload *offload;
 	union {
 		struct work_struct work;
 		struct rcu_head	rcu;
@@ -368,6 +396,7 @@ int __bpf_prog_charge(struct user_struct *user, u32 pages);
 void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
 
 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
+void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
 
 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
 struct bpf_map *__bpf_map_get(struct fd f);
@@ -377,6 +406,7 @@ void bpf_map_put(struct bpf_map *map);
 int bpf_map_precharge_memlock(u32 pages);
 void *bpf_map_area_alloc(size_t size, int numa_node);
 void bpf_map_area_free(void *base);
+void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
 
 extern int sysctl_unprivileged_bpf_disabled;
 
@@ -554,6 +584,15 @@ void bpf_prog_offload_destroy(struct bpf_prog *prog);
 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
 			       struct bpf_prog *prog);
 
+int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
+int bpf_map_offload_update_elem(struct bpf_map *map,
+				void *key, void *value, u64 flags);
+int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
+int bpf_map_offload_get_next_key(struct bpf_map *map,
+				 void *key, void *next_key);
+
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map);
+
 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
 
@@ -561,6 +600,14 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
 {
 	return aux->offload_requested;
 }
+
+static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+{
+	return unlikely(map->ops == &bpf_map_offload_ops);
+}
+
+struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
+void bpf_map_offload_map_free(struct bpf_map *map);
 #else
 static inline int bpf_prog_offload_init(struct bpf_prog *prog,
 					union bpf_attr *attr)
@@ -572,6 +619,20 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
 {
 	return false;
 }
+
+static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+{
+	return false;
+}
+
+static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
+{
+	return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void bpf_map_offload_map_free(struct bpf_map *map)
+{
+}
 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
 
 #if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
@@ -613,15 +674,4 @@ extern const struct bpf_func_proto bpf_sock_map_update_proto;
 void bpf_user_rnd_init_once(void);
 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 
-#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
-#ifdef CONFIG_BPF_KPROBE_OVERRIDE
-#define BPF_ALLOW_ERROR_INJECTION(fname)				\
-static unsigned long __used						\
-	__attribute__((__section__("_kprobe_error_inject_list")))	\
-	_eil_addr_##fname = (unsigned long)fname;
-#else
-#define BPF_ALLOW_ERROR_INJECTION(fname)
-#endif
-#endif
-
 #endif /* _LINUX_BPF_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 94a59ba..519e949 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -32,7 +32,6 @@ struct completion {
 #define init_completion(x) __init_completion(x)
 static inline void complete_acquire(struct completion *x) {}
 static inline void complete_release(struct completion *x) {}
-static inline void complete_release_commit(struct completion *x) {}
 
 #define COMPLETION_INITIALIZER(work) \
 	{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index a04ef7c1..7b01bc1 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -47,6 +47,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
 extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
 extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
 
+extern ssize_t cpu_show_meltdown(struct device *dev,
+				 struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spectre_v1(struct device *dev,
+				   struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spectre_v2(struct device *dev,
+				   struct device_attribute *attr, char *buf);
+
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,
 				 const struct attribute_group **groups,
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 06097ef..b511f6d 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -42,6 +42,8 @@ phys_addr_t paddr_vmcoreinfo_note(void);
 	vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
 #define VMCOREINFO_SYMBOL(name) \
 	vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
+#define VMCOREINFO_SYMBOL_ARRAY(name) \
+	vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name)
 #define VMCOREINFO_SIZE(name) \
 	vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
 			      (unsigned long)sizeof(name))
diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h
new file mode 100644
index 0000000..280c61e
--- /dev/null
+++ b/include/linux/error-injection.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ERROR_INJECTION_H
+#define _LINUX_ERROR_INJECTION_H
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+
+#include <asm/error-injection.h>
+
+extern bool within_error_injection_list(unsigned long addr);
+extern int get_injectable_error_type(unsigned long addr);
+
+#else /* !CONFIG_FUNCTION_ERROR_INJECTION */
+
+#include <asm-generic/error-injection.h>
+static inline bool within_error_injection_list(unsigned long addr)
+{
+	return false;
+}
+
+static inline int get_injectable_error_type(unsigned long addr)
+{
+	return EI_ETYPE_NONE;
+}
+
+#endif
+
+#endif /* _LINUX_ERROR_INJECTION_H */
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 46cb57d..1b3996f 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -27,22 +27,18 @@
 # define trace_hardirq_enter()			\
 do {						\
 	current->hardirq_context++;		\
-	crossrelease_hist_start(XHLOCK_HARD);	\
 } while (0)
 # define trace_hardirq_exit()			\
 do {						\
 	current->hardirq_context--;		\
-	crossrelease_hist_end(XHLOCK_HARD);	\
 } while (0)
 # define lockdep_softirq_enter()		\
 do {						\
 	current->softirq_context++;		\
-	crossrelease_hist_start(XHLOCK_SOFT);	\
 } while (0)
 # define lockdep_softirq_exit()			\
 do {						\
 	current->softirq_context--;		\
-	crossrelease_hist_end(XHLOCK_SOFT);	\
 } while (0)
 # define INIT_TRACE_IRQFLAGS	.softirqs_enabled = 1,
 #else
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 963fd36..9440a2f 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -271,7 +271,6 @@ extern bool arch_kprobe_on_func_entry(unsigned long offset);
 extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
 
 extern bool within_kprobe_blacklist(unsigned long addr);
-extern bool within_kprobe_error_injection_list(unsigned long addr);
 
 struct kprobe_insn_cache {
 	struct mutex mutex;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 2e75dc3..3251d9c 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -475,8 +475,6 @@ enum xhlock_context_t {
 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
 	{ .name = (_name), .key = (void *)(_key), }
 
-static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
-static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
 static inline void lockdep_invariant_state(bool force) {}
 static inline void lockdep_init_task(struct task_struct *task) {}
 static inline void lockdep_free_task(struct task_struct *task) {}
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 1f509d0..a061042 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -36,6 +36,7 @@
 #include <linux/kernel.h>
 #include <linux/completion.h>
 #include <linux/pci.h>
+#include <linux/irq.h>
 #include <linux/spinlock_types.h>
 #include <linux/semaphore.h>
 #include <linux/slab.h>
@@ -1231,7 +1232,23 @@ enum {
 static inline const struct cpumask *
 mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
 {
-	return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector);
+	const struct cpumask *mask;
+	struct irq_desc *desc;
+	unsigned int irq;
+	int eqn;
+	int err;
+
+	err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
+	if (err)
+		return NULL;
+
+	desc = irq_to_desc(irq);
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+	mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
+#else
+	mask = desc->irq_common_data.affinity;
+#endif
+	return mask;
 }
 
 #endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 78e36fc..acd829d 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1035,8 +1035,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 	u8         log_max_wq_sz[0x5];
 
 	u8         nic_vport_change_event[0x1];
-	u8         disable_local_lb[0x1];
-	u8         reserved_at_3e2[0x1];
+	u8         disable_local_lb_uc[0x1];
+	u8         disable_local_lb_mc[0x1];
 	u8         log_min_hairpin_wq_data_sz[0x5];
 	u8         reserved_at_3e8[0x3];
 	u8         log_max_vlan_list[0x5];
diff --git a/include/linux/module.h b/include/linux/module.h
index 548fa09..9642d31 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -19,6 +19,7 @@
 #include <linux/jump_label.h>
 #include <linux/export.h>
 #include <linux/rbtree_latch.h>
+#include <linux/error-injection.h>
 
 #include <linux/percpu.h>
 #include <asm/module.h>
@@ -476,9 +477,9 @@ struct module {
 	unsigned int num_ctors;
 #endif
 
-#ifdef CONFIG_BPF_KPROBE_OVERRIDE
-	unsigned int num_kprobe_ei_funcs;
-	unsigned long *kprobe_ei_funcs;
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+	struct error_injection_entry *ei_funcs;
+	unsigned int num_ei_funcs;
 #endif
 } ____cacheline_aligned __randomize_layout;
 #ifndef MODULE_ARCH_INIT
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6d95477..ed0799a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -805,6 +805,8 @@ enum bpf_netdev_command {
 	BPF_OFFLOAD_VERIFIER_PREP,
 	BPF_OFFLOAD_TRANSLATE,
 	BPF_OFFLOAD_DESTROY,
+	BPF_OFFLOAD_MAP_ALLOC,
+	BPF_OFFLOAD_MAP_FREE,
 };
 
 struct bpf_prog_offload_ops;
@@ -835,6 +837,10 @@ struct netdev_bpf {
 		struct {
 			struct bpf_prog *prog;
 		} offload;
+		/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
+		struct {
+			struct bpf_offloaded_map *offmap;
+		};
 	};
 };
 
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 49b4257..f3075d6 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -85,7 +85,7 @@ struct netlink_ext_ack {
  * to the lack of an output buffer.)
  */
 #define NL_SET_ERR_MSG(extack, msg) do {		\
-	static const char __msg[] = (msg);		\
+	static const char __msg[] = msg;		\
 	struct netlink_ext_ack *__extack = (extack);	\
 							\
 	if (__extack)					\
@@ -101,7 +101,7 @@ struct netlink_ext_ack {
 } while (0)
 
 #define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do {	\
-	static const char __msg[] = (msg);		\
+	static const char __msg[] = msg;		\
 	struct netlink_ext_ack *__extack = (extack);	\
 							\
 	if (__extack) {					\
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 47715a3..5a0c3e5 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -765,6 +765,55 @@ int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
 int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
 
 /**
+ * __phy_set_bits - Convenience function for setting bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to set
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+	return __phy_modify(phydev, regnum, 0, val);
+}
+
+/**
+ * __phy_clear_bits - Convenience function for clearing bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to clear
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_clear_bits(struct phy_device *phydev, u32 regnum,
+				   u16 val)
+{
+	return __phy_modify(phydev, regnum, val, 0);
+}
+
+/**
+ * phy_set_bits - Convenience function for setting bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to set
+ */
+static inline int phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+	return phy_modify(phydev, regnum, 0, val);
+}
+
+/**
+ * phy_clear_bits - Convenience function for clearing bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to clear
+ */
+static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+	return phy_modify(phydev, regnum, val, 0);
+}
+
+/**
  * phy_interrupt_is_valid - Convenience function for testing a given PHY irq
  * @phydev: the phy_device struct
  *
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 13fb06a..9ca1726 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -174,6 +174,15 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
  * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
  * If ring is never resized, and if the pointer is merely
  * tested, there's no need to take the lock - see e.g.  __ptr_ring_empty.
+ * However, if called outside the lock, and if some other CPU
+ * consumes ring entries at the same time, the value returned
+ * is not guaranteed to be correct.
+ * In this case - to avoid incorrectly detecting the ring
+ * as empty - the CPU consuming the ring entries is responsible
+ * for either consuming all ring entries until the ring is empty,
+ * or synchronizing with some other CPU and causing it to
+ * execute __ptr_ring_peek and/or consume the ring enteries
+ * after the synchronization point.
  */
 static inline void *__ptr_ring_peek(struct ptr_ring *r)
 {
@@ -182,10 +191,7 @@ static inline void *__ptr_ring_peek(struct ptr_ring *r)
 	return NULL;
 }
 
-/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). Callers must take consumer_lock
- * if the ring is ever resized - see e.g. ptr_ring_empty.
- */
+/* See __ptr_ring_peek above for locking rules. */
 static inline bool __ptr_ring_empty(struct ptr_ring *r)
 {
 	return !__ptr_ring_peek(r);
diff --git a/include/net/arp.h b/include/net/arp.h
index dc8cd47..977aabf 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -20,6 +20,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
 
 static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
 {
+	if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
+		key = INADDR_ANY;
+
 	return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
 }
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index ab30a22..81174f9 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -815,6 +815,8 @@ struct cfg80211_csa_settings {
 	u8 count;
 };
 
+#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
+
 /**
  * struct iface_combination_params - input parameters for interface combinations
  *
diff --git a/include/net/devlink.h b/include/net/devlink.h
index b9654e1..6545b03 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -26,10 +26,12 @@ struct devlink {
 	struct list_head port_list;
 	struct list_head sb_list;
 	struct list_head dpipe_table_list;
+	struct list_head resource_list;
 	struct devlink_dpipe_headers *dpipe_headers;
 	const struct devlink_ops *ops;
 	struct device *dev;
 	possible_net_t _net;
+	struct mutex lock;
 	char priv[0] __aligned(NETDEV_ALIGN);
 };
 
@@ -181,6 +183,9 @@ struct devlink_dpipe_table_ops;
  * @counters_enabled: indicates if counters are active
  * @counter_control_extern: indicates if counter control is in dpipe or
  *			    external tool
+ * @resource_valid: Indicate that the resource id is valid
+ * @resource_id: relative resource this table is related to
+ * @resource_units: number of resource's unit consumed per table's entry
  * @table_ops: table operations
  * @rcu: rcu
  */
@@ -190,6 +195,9 @@ struct devlink_dpipe_table {
 	const char *name;
 	bool counters_enabled;
 	bool counter_control_extern;
+	bool resource_valid;
+	u64 resource_id;
+	u64 resource_units;
 	struct devlink_dpipe_table_ops *table_ops;
 	struct rcu_head rcu;
 };
@@ -223,7 +231,63 @@ struct devlink_dpipe_headers {
 	unsigned int headers_count;
 };
 
+/**
+ * struct devlink_resource_ops - resource ops
+ * @occ_get: get the occupied size
+ * @size_validate: validate the size of the resource before update, reload
+ *                 is needed for changes to take place
+ */
+struct devlink_resource_ops {
+	u64 (*occ_get)(struct devlink *devlink);
+	int (*size_validate)(struct devlink *devlink, u64 size,
+			     struct netlink_ext_ack *extack);
+};
+
+/**
+ * struct devlink_resource_size_params - resource's size parameters
+ * @size_min: minimum size which can be set
+ * @size_max: maximum size which can be set
+ * @size_granularity: size granularity
+ * @size_unit: resource's basic unit
+ */
+struct devlink_resource_size_params {
+	u64 size_min;
+	u64 size_max;
+	u64 size_granularity;
+	enum devlink_resource_unit unit;
+};
+
+/**
+ * struct devlink_resource - devlink resource
+ * @name: name of the resource
+ * @id: id, per devlink instance
+ * @size: size of the resource
+ * @size_new: updated size of the resource, reload is needed
+ * @size_valid: valid in case the total size of the resource is valid
+ *              including its children
+ * @parent: parent resource
+ * @size_params: size parameters
+ * @list: parent list
+ * @resource_list: list of child resources
+ * @resource_ops: resource ops
+ */
+struct devlink_resource {
+	const char *name;
+	u64 id;
+	u64 size;
+	u64 size_new;
+	bool size_valid;
+	struct devlink_resource *parent;
+	struct devlink_resource_size_params *size_params;
+	struct list_head list;
+	struct list_head resource_list;
+	const struct devlink_resource_ops *resource_ops;
+};
+
+#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
+
 struct devlink_ops {
+	int (*reload)(struct devlink *devlink);
 	int (*port_type_set)(struct devlink_port *devlink_port,
 			     enum devlink_port_type port_type);
 	int (*port_split)(struct devlink *devlink, unsigned int port_index,
@@ -332,6 +396,23 @@ extern struct devlink_dpipe_header devlink_dpipe_header_ethernet;
 extern struct devlink_dpipe_header devlink_dpipe_header_ipv4;
 extern struct devlink_dpipe_header devlink_dpipe_header_ipv6;
 
+int devlink_resource_register(struct devlink *devlink,
+			      const char *resource_name,
+			      bool top_hierarchy,
+			      u64 resource_size,
+			      u64 resource_id,
+			      u64 parent_resource_id,
+			      struct devlink_resource_size_params *size_params,
+			      const struct devlink_resource_ops *resource_ops);
+void devlink_resources_unregister(struct devlink *devlink,
+				  struct devlink_resource *resource);
+int devlink_resource_size_get(struct devlink *devlink,
+			      u64 resource_id,
+			      u64 *p_resource_size);
+int devlink_dpipe_table_resource_set(struct devlink *devlink,
+				     const char *table_name, u64 resource_id,
+				     u64 resource_units);
+
 #else
 
 static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
@@ -468,6 +549,40 @@ devlink_dpipe_match_put(struct sk_buff *skb,
 	return 0;
 }
 
+static inline int
+devlink_resource_register(struct devlink *devlink,
+			  const char *resource_name,
+			  bool top_hierarchy,
+			  u64 resource_size,
+			  u64 resource_id,
+			  u64 parent_resource_id,
+			  struct devlink_resource_size_params *size_params,
+			  const struct devlink_resource_ops *resource_ops)
+{
+	return 0;
+}
+
+static inline void
+devlink_resources_unregister(struct devlink *devlink,
+			     struct devlink_resource *resource)
+{
+}
+
+static inline int
+devlink_resource_size_get(struct devlink *devlink, u64 resource_id,
+			  u64 *p_resource_size)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int
+devlink_dpipe_table_resource_set(struct devlink *devlink,
+				 const char *table_name, u64 resource_id,
+				 u64 resource_units)
+{
+	return -EOPNOTSUPP;
+}
+
 #endif
 
 #endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 9c341f0..789d818 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -29,6 +29,7 @@ struct tcf_block_ext_info {
 	enum tcf_block_binder_type binder_type;
 	tcf_chain_head_change_t *chain_head_change;
 	void *chain_head_change_priv;
+	u32 block_index;
 };
 
 struct tcf_block_cb;
@@ -38,6 +39,7 @@ bool tcf_queue_work(struct work_struct *work);
 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
 				bool create);
 void tcf_chain_put(struct tcf_chain *chain);
+void tcf_block_netif_keep_dst(struct tcf_block *block);
 int tcf_block_get(struct tcf_block **p_block,
 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
 		  struct netlink_ext_ack *extack);
@@ -48,8 +50,14 @@ void tcf_block_put(struct tcf_block *block);
 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 		       struct tcf_block_ext_info *ei);
 
+static inline bool tcf_block_shared(struct tcf_block *block)
+{
+	return block->index;
+}
+
 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 {
+	WARN_ON(tcf_block_shared(block));
 	return block->q;
 }
 
@@ -748,6 +756,7 @@ struct tc_red_qopt_offload_params {
 	u32 max;
 	u32 probability;
 	bool is_ecn;
+	struct gnet_stats_queue *qstats;
 };
 
 struct tc_red_qopt_offload {
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index e2c75f5..815b92a 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -100,7 +100,6 @@ int qdisc_set_default(const char *id);
 void qdisc_hash_add(struct Qdisc *q, bool invisible);
 void qdisc_hash_del(struct Qdisc *q);
 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
-struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
 					struct nlattr *tab,
 					struct netlink_ext_ack *extack);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index ac029d5..cfc19d0 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -204,6 +204,13 @@ struct Qdisc_ops {
 	int			(*dump)(struct Qdisc *, struct sk_buff *);
 	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);
 
+	void			(*ingress_block_set)(struct Qdisc *sch,
+						     u32 block_index);
+	void			(*egress_block_set)(struct Qdisc *sch,
+						    u32 block_index);
+	u32			(*ingress_block_get)(struct Qdisc *sch);
+	u32			(*egress_block_get)(struct Qdisc *sch);
+
 	struct module		*owner;
 };
 
@@ -255,8 +262,6 @@ struct tcf_proto {
 
 	/* All the rest */
 	u32			prio;
-	u32			classid;
-	struct Qdisc		*q;
 	void			*data;
 	const struct tcf_proto_ops	*ops;
 	struct tcf_chain	*chain;
@@ -275,8 +280,7 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
 
 struct tcf_chain {
 	struct tcf_proto __rcu *filter_chain;
-	tcf_chain_head_change_t *chain_head_change;
-	void *chain_head_change_priv;
+	struct list_head filter_chain_list;
 	struct list_head list;
 	struct tcf_block *block;
 	u32 index; /* chain index */
@@ -285,11 +289,33 @@ struct tcf_chain {
 
 struct tcf_block {
 	struct list_head chain_list;
+	u32 index; /* block index for shared blocks */
+	unsigned int refcnt;
 	struct net *net;
 	struct Qdisc *q;
 	struct list_head cb_list;
+	struct list_head owner_list;
+	bool keep_dst;
+	unsigned int offloadcnt; /* Number of oddloaded filters */
+	unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
 };
 
+static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
+{
+	if (*flags & TCA_CLS_FLAGS_IN_HW)
+		return;
+	*flags |= TCA_CLS_FLAGS_IN_HW;
+	block->offloadcnt++;
+}
+
+static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
+{
+	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
+		return;
+	*flags &= ~TCA_CLS_FLAGS_IN_HW;
+	block->offloadcnt--;
+}
+
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
 {
 	struct qdisc_skb_cb *qcb;
@@ -473,6 +499,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 			  const struct Qdisc_ops *ops,
 			  struct netlink_ext_ack *extack);
+void qdisc_free(struct Qdisc *qdisc);
 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
 				const struct Qdisc_ops *ops, u32 parentid,
 				struct netlink_ext_ack *extack);
diff --git a/include/net/tls.h b/include/net/tls.h
index 936cfc5..9185e53 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -170,7 +170,7 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
 
 static inline void tls_err_abort(struct sock *sk)
 {
-	sk->sk_err = -EBADMSG;
+	sk->sk_err = EBADMSG;
 	sk->sk_error_report(sk);
 }
 
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index f96391e..ad73d8b 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -301,7 +301,7 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
 		l4_hdr = ipv6_hdr(skb)->nexthdr;
 		break;
 	default:
-		return features;;
+		return features;
 	}
 
 	if ((l4_hdr == IPPROTO_UDP) &&
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 405317f..7c2259e 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -245,6 +245,7 @@ union bpf_attr {
 					 * BPF_F_NUMA_NODE is set).
 					 */
 		char	map_name[BPF_OBJ_NAME_LEN];
+		__u32	map_ifindex;	/* ifindex of netdev to create on */
 	};
 
 	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -899,7 +900,7 @@ struct xdp_md {
 	__u32 data;
 	__u32 data_end;
 	__u32 data_meta;
-	/* Below access go though struct xdp_rxq_info */
+	/* Below access go through struct xdp_rxq_info */
 	__u32 ingress_ifindex; /* rxq->dev->ifindex */
 	__u32 rx_queue_index;  /* rxq->queue_index  */
 };
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 6665df6..1df65a4 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -70,6 +70,13 @@ enum devlink_command {
 	DEVLINK_CMD_DPIPE_ENTRIES_GET,
 	DEVLINK_CMD_DPIPE_HEADERS_GET,
 	DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
+	DEVLINK_CMD_RESOURCE_SET,
+	DEVLINK_CMD_RESOURCE_DUMP,
+
+	/* Hot driver reload, makes configuration changes take place. The
+	 * devlink instance is not released during the process.
+	 */
+	DEVLINK_CMD_RELOAD,
 
 	/* add new commands above here */
 	__DEVLINK_CMD_MAX,
@@ -202,6 +209,20 @@ enum devlink_attr {
 	DEVLINK_ATTR_PAD,
 
 	DEVLINK_ATTR_ESWITCH_ENCAP_MODE,	/* u8 */
+	DEVLINK_ATTR_RESOURCE_LIST,		/* nested */
+	DEVLINK_ATTR_RESOURCE,			/* nested */
+	DEVLINK_ATTR_RESOURCE_NAME,		/* string */
+	DEVLINK_ATTR_RESOURCE_ID,		/* u64 */
+	DEVLINK_ATTR_RESOURCE_SIZE,		/* u64 */
+	DEVLINK_ATTR_RESOURCE_SIZE_NEW,		/* u64 */
+	DEVLINK_ATTR_RESOURCE_SIZE_VALID,	/* u8 */
+	DEVLINK_ATTR_RESOURCE_SIZE_MIN,		/* u64 */
+	DEVLINK_ATTR_RESOURCE_SIZE_MAX,		/* u64 */
+	DEVLINK_ATTR_RESOURCE_SIZE_GRAN,        /* u64 */
+	DEVLINK_ATTR_RESOURCE_UNIT,		/* u8 */
+	DEVLINK_ATTR_RESOURCE_OCC,		/* u64 */
+	DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,	/* u64 */
+	DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,/* u64 */
 
 	/* add new attributes above here, update the policy in devlink.c */
 
@@ -245,4 +266,8 @@ enum devlink_dpipe_header_id {
 	DEVLINK_DPIPE_HEADER_IPV6,
 };
 
+enum devlink_resource_unit {
+	DEVLINK_RESOURCE_UNIT_ENTRY,
+};
+
 #endif /* _UAPI_LINUX_DEVLINK_H_ */
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index fb38c17..ee432cd 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -58,6 +58,7 @@
 #define TUNSETVNETBE _IOW('T', 222, int)
 #define TUNGETVNETBE _IOR('T', 223, int)
 #define TUNSETSTEERINGEBPF _IOR('T', 224, int)
+#define TUNSETFILTEREBPF _IOR('T', 225, int)
 
 /* TUNSETIFF ifr flags */
 #define IFF_TUN		0x0001
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 4265d7f..dcfab5e 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -363,7 +363,6 @@ enum ovs_tunnel_key_attr {
 	OVS_TUNNEL_KEY_ATTR_IPV6_SRC,		/* struct in6_addr src IPv6 address. */
 	OVS_TUNNEL_KEY_ATTR_IPV6_DST,		/* struct in6_addr dst IPv6 address. */
 	OVS_TUNNEL_KEY_ATTR_PAD,
-	OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,	/* be32 ERSPAN index. */
 	__OVS_TUNNEL_KEY_ATTR_MAX
 };
 
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 843e29a..9b15005 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -541,9 +541,19 @@ struct tcmsg {
 	int		tcm_ifindex;
 	__u32		tcm_handle;
 	__u32		tcm_parent;
+/* tcm_block_index is used instead of tcm_parent
+ * in case tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK
+ */
+#define tcm_block_index tcm_parent
 	__u32		tcm_info;
 };
 
+/* For manipulation of filters in shared block, tcm_ifindex is set to
+ * TCM_IFINDEX_MAGIC_BLOCK, and tcm_parent is aliased to tcm_block_index
+ * which is the block index.
+ */
+#define TCM_IFINDEX_MAGIC_BLOCK (0xFFFFFFFFU)
+
 enum {
 	TCA_UNSPEC,
 	TCA_KIND,
@@ -558,6 +568,8 @@ enum {
 	TCA_DUMP_INVISIBLE,
 	TCA_CHAIN,
 	TCA_HW_OFFLOAD,
+	TCA_INGRESS_BLOCK,
+	TCA_EGRESS_BLOCK,
 	__TCA_MAX
 };
 
diff --git a/init/Kconfig b/init/Kconfig
index 19a6b84..a9a2e2c 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -461,6 +461,7 @@
 
 config CPU_ISOLATION
 	bool "CPU isolation"
+	depends on SMP || COMPILE_TEST
 	default y
 	help
 	  Make sure that CPUs running critical tasks are not disturbed by
diff --git a/kernel/Makefile b/kernel/Makefile
index 172d151d..f85ae5d 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -81,6 +81,7 @@
 obj-$(CONFIG_GCOV_KERNEL) += gcov/
 obj-$(CONFIG_KCOV) += kcov.o
 obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_FAIL_FUNCTION) += fail_function.o
 obj-$(CONFIG_KGDB) += debug/
 obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
 obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index aaa3198..ab94d30 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -56,7 +56,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 	u32 elem_size, index_mask, max_entries;
 	bool unpriv = !capable(CAP_SYS_ADMIN);
 	struct bpf_array *array;
-	u64 array_size;
+	u64 array_size, mask64;
 
 	/* check sanity of attributes */
 	if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -74,13 +74,25 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 	elem_size = round_up(attr->value_size, 8);
 
 	max_entries = attr->max_entries;
-	index_mask = roundup_pow_of_two(max_entries) - 1;
 
-	if (unpriv)
+	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
+	 * upper most bit set in u32 space is undefined behavior due to
+	 * resulting 1U << 32, so do it manually here in u64 space.
+	 */
+	mask64 = fls_long(max_entries - 1);
+	mask64 = 1ULL << mask64;
+	mask64 -= 1;
+
+	index_mask = mask64;
+	if (unpriv) {
 		/* round up array size to nearest power of 2,
 		 * since cpu will speculate within index_mask limits
 		 */
 		max_entries = index_mask + 1;
+		/* Check for overflows. */
+		if (max_entries < attr->max_entries)
+			return ERR_PTR(-E2BIG);
+	}
 
 	array_size = sizeof(*array);
 	if (percpu)
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index ce5b669..fbfdada6 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -94,13 +94,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
 	if (!cmap)
 		return ERR_PTR(-ENOMEM);
 
-	/* mandatory map attributes */
-	cmap->map.map_type = attr->map_type;
-	cmap->map.key_size = attr->key_size;
-	cmap->map.value_size = attr->value_size;
-	cmap->map.max_entries = attr->max_entries;
-	cmap->map.map_flags = attr->map_flags;
-	cmap->map.numa_node = bpf_map_attr_numa_node(attr);
+	bpf_map_init_from_attr(&cmap->map, attr);
 
 	/* Pre-limit array size based on NR_CPUS, not final CPU check */
 	if (cmap->map.max_entries > NR_CPUS) {
@@ -143,7 +137,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
 	return ERR_PTR(err);
 }
 
-void __cpu_map_queue_destructor(void *ptr)
+static void __cpu_map_queue_destructor(void *ptr)
 {
 	/* The tear-down procedure should have made sure that queue is
 	 * empty.  See __cpu_map_entry_replace() and work-queue
@@ -222,8 +216,8 @@ static struct xdp_pkt *convert_to_xdp_pkt(struct xdp_buff *xdp)
 	return xdp_pkt;
 }
 
-struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
-				  struct xdp_pkt *xdp_pkt)
+static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
+					 struct xdp_pkt *xdp_pkt)
 {
 	unsigned int frame_size;
 	void *pkt_data_start;
@@ -337,7 +331,8 @@ static int cpu_map_kthread_run(void *data)
 	return 0;
 }
 
-struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, int map_id)
+static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
+						       int map_id)
 {
 	gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
 	struct bpf_cpu_map_entry *rcpu;
@@ -395,7 +390,7 @@ struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, int map_id)
 	return NULL;
 }
 
-void __cpu_map_entry_free(struct rcu_head *rcu)
+static void __cpu_map_entry_free(struct rcu_head *rcu)
 {
 	struct bpf_cpu_map_entry *rcpu;
 	int cpu;
@@ -438,8 +433,8 @@ void __cpu_map_entry_free(struct rcu_head *rcu)
  * cpu_map_kthread_stop, which waits for an RCU graze period before
  * stopping kthread, emptying the queue.
  */
-void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
-			     u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
+static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
+				    u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
 {
 	struct bpf_cpu_map_entry *old_rcpu;
 
@@ -451,7 +446,7 @@ void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
 	}
 }
 
-int cpu_map_delete_elem(struct bpf_map *map, void *key)
+static int cpu_map_delete_elem(struct bpf_map *map, void *key)
 {
 	struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
 	u32 key_cpu = *(u32 *)key;
@@ -464,8 +459,8 @@ int cpu_map_delete_elem(struct bpf_map *map, void *key)
 	return 0;
 }
 
-int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
-				u64 map_flags)
+static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
+			       u64 map_flags)
 {
 	struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
 	struct bpf_cpu_map_entry *rcpu;
@@ -502,7 +497,7 @@ int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
 	return 0;
 }
 
-void cpu_map_free(struct bpf_map *map)
+static void cpu_map_free(struct bpf_map *map)
 {
 	struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
 	int cpu;
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index ebdef54..565f9ec 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -93,13 +93,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
 	if (!dtab)
 		return ERR_PTR(-ENOMEM);
 
-	/* mandatory map attributes */
-	dtab->map.map_type = attr->map_type;
-	dtab->map.key_size = attr->key_size;
-	dtab->map.value_size = attr->value_size;
-	dtab->map.max_entries = attr->max_entries;
-	dtab->map.map_flags = attr->map_flags;
-	dtab->map.numa_node = bpf_map_attr_numa_node(attr);
+	bpf_map_init_from_attr(&dtab->map, attr);
 
 	/* make sure page count doesn't overflow */
 	cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
diff --git a/kernel/bpf/disasm.h b/kernel/bpf/disasm.h
index e0857d0..266fe8e 100644
--- a/kernel/bpf/disasm.h
+++ b/kernel/bpf/disasm.h
@@ -29,8 +29,8 @@ extern const char *const bpf_class_string[8];
 
 const char *func_id_name(int id);
 
-typedef void (*bpf_insn_print_t)(struct bpf_verifier_env *env,
-				 const char *, ...);
+typedef __printf(2, 3) void (*bpf_insn_print_t)(struct bpf_verifier_env *env,
+						const char *, ...);
 typedef const char *(*bpf_insn_revmap_call_t)(void *private_data,
 					      const struct bpf_insn *insn);
 typedef const char *(*bpf_insn_print_imm_t)(void *private_data,
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3905d4b..b76828f 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -227,7 +227,7 @@ static int alloc_extra_elems(struct bpf_htab *htab)
 }
 
 /* Called from syscall */
-static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+static int htab_map_alloc_check(union bpf_attr *attr)
 {
 	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
 		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
@@ -241,9 +241,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 	int numa_node = bpf_map_attr_numa_node(attr);
-	struct bpf_htab *htab;
-	int err, i;
-	u64 cost;
 
 	BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
 		     offsetof(struct htab_elem, hash_node.pprev));
@@ -254,40 +251,68 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 		/* LRU implementation is much complicated than other
 		 * maps.  Hence, limit to CAP_SYS_ADMIN for now.
 		 */
-		return ERR_PTR(-EPERM);
+		return -EPERM;
 
 	if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
 		/* reserved bits should not be used */
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	if (!lru && percpu_lru)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	if (lru && !prealloc)
-		return ERR_PTR(-ENOTSUPP);
+		return -ENOTSUPP;
 
 	if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
+
+	/* check sanity of attributes.
+	 * value_size == 0 may be allowed in the future to use map as a set
+	 */
+	if (attr->max_entries == 0 || attr->key_size == 0 ||
+	    attr->value_size == 0)
+		return -EINVAL;
+
+	if (attr->key_size > MAX_BPF_STACK)
+		/* eBPF programs initialize keys on stack, so they cannot be
+		 * larger than max stack size
+		 */
+		return -E2BIG;
+
+	if (attr->value_size >= KMALLOC_MAX_SIZE -
+	    MAX_BPF_STACK - sizeof(struct htab_elem))
+		/* if value_size is bigger, the user space won't be able to
+		 * access the elements via bpf syscall. This check also makes
+		 * sure that the elem_size doesn't overflow and it's
+		 * kmalloc-able later in htab_map_update_elem()
+		 */
+		return -E2BIG;
+
+	return 0;
+}
+
+static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
+{
+	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
+	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
+		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
+	/* percpu_lru means each cpu has its own LRU list.
+	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
+	 * the map's value itself is percpu.  percpu_lru has
+	 * nothing to do with the map's value.
+	 */
+	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
+	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
+	struct bpf_htab *htab;
+	int err, i;
+	u64 cost;
 
 	htab = kzalloc(sizeof(*htab), GFP_USER);
 	if (!htab)
 		return ERR_PTR(-ENOMEM);
 
-	/* mandatory map attributes */
-	htab->map.map_type = attr->map_type;
-	htab->map.key_size = attr->key_size;
-	htab->map.value_size = attr->value_size;
-	htab->map.max_entries = attr->max_entries;
-	htab->map.map_flags = attr->map_flags;
-	htab->map.numa_node = numa_node;
-
-	/* check sanity of attributes.
-	 * value_size == 0 may be allowed in the future to use map as a set
-	 */
-	err = -EINVAL;
-	if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
-	    htab->map.value_size == 0)
-		goto free_htab;
+	bpf_map_init_from_attr(&htab->map, attr);
 
 	if (percpu_lru) {
 		/* ensure each CPU's lru list has >=1 elements.
@@ -304,22 +329,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 	/* hash table size must be power of 2 */
 	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
 
-	err = -E2BIG;
-	if (htab->map.key_size > MAX_BPF_STACK)
-		/* eBPF programs initialize keys on stack, so they cannot be
-		 * larger than max stack size
-		 */
-		goto free_htab;
-
-	if (htab->map.value_size >= KMALLOC_MAX_SIZE -
-	    MAX_BPF_STACK - sizeof(struct htab_elem))
-		/* if value_size is bigger, the user space won't be able to
-		 * access the elements via bpf syscall. This check also makes
-		 * sure that the elem_size doesn't overflow and it's
-		 * kmalloc-able later in htab_map_update_elem()
-		 */
-		goto free_htab;
-
 	htab->elem_size = sizeof(struct htab_elem) +
 			  round_up(htab->map.key_size, 8);
 	if (percpu)
@@ -327,6 +336,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 	else
 		htab->elem_size += round_up(htab->map.value_size, 8);
 
+	err = -E2BIG;
 	/* prevent zero size kmalloc and check for u32 overflow */
 	if (htab->n_buckets == 0 ||
 	    htab->n_buckets > U32_MAX / sizeof(struct bucket))
@@ -1143,6 +1153,7 @@ static void htab_map_free(struct bpf_map *map)
 }
 
 const struct bpf_map_ops htab_map_ops = {
+	.map_alloc_check = htab_map_alloc_check,
 	.map_alloc = htab_map_alloc,
 	.map_free = htab_map_free,
 	.map_get_next_key = htab_map_get_next_key,
@@ -1153,6 +1164,7 @@ const struct bpf_map_ops htab_map_ops = {
 };
 
 const struct bpf_map_ops htab_lru_map_ops = {
+	.map_alloc_check = htab_map_alloc_check,
 	.map_alloc = htab_map_alloc,
 	.map_free = htab_map_free,
 	.map_get_next_key = htab_map_get_next_key,
@@ -1236,6 +1248,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
 }
 
 const struct bpf_map_ops htab_percpu_map_ops = {
+	.map_alloc_check = htab_map_alloc_check,
 	.map_alloc = htab_map_alloc,
 	.map_free = htab_map_free,
 	.map_get_next_key = htab_map_get_next_key,
@@ -1245,6 +1258,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
 };
 
 const struct bpf_map_ops htab_lru_percpu_map_ops = {
+	.map_alloc_check = htab_map_alloc_check,
 	.map_alloc = htab_map_alloc,
 	.map_free = htab_map_free,
 	.map_get_next_key = htab_map_get_next_key,
@@ -1253,11 +1267,11 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
 	.map_delete_elem = htab_lru_map_delete_elem,
 };
 
-static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
+static int fd_htab_map_alloc_check(union bpf_attr *attr)
 {
 	if (attr->value_size != sizeof(u32))
-		return ERR_PTR(-EINVAL);
-	return htab_map_alloc(attr);
+		return -EINVAL;
+	return htab_map_alloc_check(attr);
 }
 
 static void fd_htab_map_free(struct bpf_map *map)
@@ -1328,7 +1342,7 @@ static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
 	if (IS_ERR(inner_map_meta))
 		return inner_map_meta;
 
-	map = fd_htab_map_alloc(attr);
+	map = htab_map_alloc(attr);
 	if (IS_ERR(map)) {
 		bpf_map_meta_free(inner_map_meta);
 		return map;
@@ -1372,6 +1386,7 @@ static void htab_of_map_free(struct bpf_map *map)
 }
 
 const struct bpf_map_ops htab_of_maps_map_ops = {
+	.map_alloc_check = fd_htab_map_alloc_check,
 	.map_alloc = htab_of_map_alloc,
 	.map_free = htab_of_map_free,
 	.map_get_next_key = htab_map_get_next_key,
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 885e4547..584e022 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -522,12 +522,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
 		return ERR_PTR(-ENOMEM);
 
 	/* copy mandatory map attributes */
-	trie->map.map_type = attr->map_type;
-	trie->map.key_size = attr->key_size;
-	trie->map.value_size = attr->value_size;
-	trie->map.max_entries = attr->max_entries;
-	trie->map.map_flags = attr->map_flags;
-	trie->map.numa_node = bpf_map_attr_numa_node(attr);
+	bpf_map_init_from_attr(&trie->map, attr);
 	trie->data_size = attr->key_size -
 			  offsetof(struct bpf_lpm_trie_key, data);
 	trie->max_prefixlen = trie->data_size * 8;
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 040d4e0..a88cebf 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -24,15 +24,27 @@
 #include <linux/rtnetlink.h>
 #include <linux/rwsem.h>
 
-/* Protects bpf_prog_offload_devs and offload members of all progs.
+/* Protects bpf_prog_offload_devs, bpf_map_offload_devs and offload members
+ * of all progs.
  * RTNL lock cannot be taken when holding this lock.
  */
 static DECLARE_RWSEM(bpf_devs_lock);
 static LIST_HEAD(bpf_prog_offload_devs);
+static LIST_HEAD(bpf_map_offload_devs);
+
+static int bpf_dev_offload_check(struct net_device *netdev)
+{
+	if (!netdev)
+		return -EINVAL;
+	if (!netdev->netdev_ops->ndo_bpf)
+		return -EOPNOTSUPP;
+	return 0;
+}
 
 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
 {
-	struct bpf_dev_offload *offload;
+	struct bpf_prog_offload *offload;
+	int err;
 
 	if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
 	    attr->prog_type != BPF_PROG_TYPE_XDP)
@@ -49,12 +61,15 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
 
 	offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
 					   attr->prog_ifindex);
-	if (!offload->netdev)
-		goto err_free;
+	err = bpf_dev_offload_check(offload->netdev);
+	if (err)
+		goto err_maybe_put;
 
 	down_write(&bpf_devs_lock);
-	if (offload->netdev->reg_state != NETREG_REGISTERED)
+	if (offload->netdev->reg_state != NETREG_REGISTERED) {
+		err = -EINVAL;
 		goto err_unlock;
+	}
 	prog->aux->offload = offload;
 	list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
 	dev_put(offload->netdev);
@@ -63,16 +78,17 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
 	return 0;
 err_unlock:
 	up_write(&bpf_devs_lock);
-	dev_put(offload->netdev);
-err_free:
+err_maybe_put:
+	if (offload->netdev)
+		dev_put(offload->netdev);
 	kfree(offload);
-	return -EINVAL;
+	return err;
 }
 
 static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
 			     struct netdev_bpf *data)
 {
-	struct bpf_dev_offload *offload = prog->aux->offload;
+	struct bpf_prog_offload *offload = prog->aux->offload;
 	struct net_device *netdev;
 
 	ASSERT_RTNL();
@@ -80,8 +96,6 @@ static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
 	if (!offload)
 		return -ENODEV;
 	netdev = offload->netdev;
-	if (!netdev->netdev_ops->ndo_bpf)
-		return -EOPNOTSUPP;
 
 	data->command = cmd;
 
@@ -110,7 +124,7 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
 				 int insn_idx, int prev_insn_idx)
 {
-	struct bpf_dev_offload *offload;
+	struct bpf_prog_offload *offload;
 	int ret = -ENODEV;
 
 	down_read(&bpf_devs_lock);
@@ -124,7 +138,7 @@ int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
 
 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
 {
-	struct bpf_dev_offload *offload = prog->aux->offload;
+	struct bpf_prog_offload *offload = prog->aux->offload;
 	struct netdev_bpf data = {};
 
 	data.offload.prog = prog;
@@ -238,11 +252,184 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
 const struct bpf_prog_ops bpf_offload_prog_ops = {
 };
 
+static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
+			       enum bpf_netdev_command cmd)
+{
+	struct netdev_bpf data = {};
+	struct net_device *netdev;
+
+	ASSERT_RTNL();
+
+	data.command = cmd;
+	data.offmap = offmap;
+	/* Caller must make sure netdev is valid */
+	netdev = offmap->netdev;
+
+	return netdev->netdev_ops->ndo_bpf(netdev, &data);
+}
+
+struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
+{
+	struct net *net = current->nsproxy->net_ns;
+	struct bpf_offloaded_map *offmap;
+	int err;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return ERR_PTR(-EPERM);
+	if (attr->map_type != BPF_MAP_TYPE_HASH)
+		return ERR_PTR(-EINVAL);
+
+	offmap = kzalloc(sizeof(*offmap), GFP_USER);
+	if (!offmap)
+		return ERR_PTR(-ENOMEM);
+
+	bpf_map_init_from_attr(&offmap->map, attr);
+
+	rtnl_lock();
+	down_write(&bpf_devs_lock);
+	offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
+	err = bpf_dev_offload_check(offmap->netdev);
+	if (err)
+		goto err_unlock;
+
+	err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
+	if (err)
+		goto err_unlock;
+
+	list_add_tail(&offmap->offloads, &bpf_map_offload_devs);
+	up_write(&bpf_devs_lock);
+	rtnl_unlock();
+
+	return &offmap->map;
+
+err_unlock:
+	up_write(&bpf_devs_lock);
+	rtnl_unlock();
+	kfree(offmap);
+	return ERR_PTR(err);
+}
+
+static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
+{
+	WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
+	/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
+	bpf_map_free_id(&offmap->map, true);
+	list_del_init(&offmap->offloads);
+	offmap->netdev = NULL;
+}
+
+void bpf_map_offload_map_free(struct bpf_map *map)
+{
+	struct bpf_offloaded_map *offmap = map_to_offmap(map);
+
+	rtnl_lock();
+	down_write(&bpf_devs_lock);
+	if (offmap->netdev)
+		__bpf_map_offload_destroy(offmap);
+	up_write(&bpf_devs_lock);
+	rtnl_unlock();
+
+	kfree(offmap);
+}
+
+int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
+{
+	struct bpf_offloaded_map *offmap = map_to_offmap(map);
+	int ret = -ENODEV;
+
+	down_read(&bpf_devs_lock);
+	if (offmap->netdev)
+		ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
+	up_read(&bpf_devs_lock);
+
+	return ret;
+}
+
+int bpf_map_offload_update_elem(struct bpf_map *map,
+				void *key, void *value, u64 flags)
+{
+	struct bpf_offloaded_map *offmap = map_to_offmap(map);
+	int ret = -ENODEV;
+
+	if (unlikely(flags > BPF_EXIST))
+		return -EINVAL;
+
+	down_read(&bpf_devs_lock);
+	if (offmap->netdev)
+		ret = offmap->dev_ops->map_update_elem(offmap, key, value,
+						       flags);
+	up_read(&bpf_devs_lock);
+
+	return ret;
+}
+
+int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
+{
+	struct bpf_offloaded_map *offmap = map_to_offmap(map);
+	int ret = -ENODEV;
+
+	down_read(&bpf_devs_lock);
+	if (offmap->netdev)
+		ret = offmap->dev_ops->map_delete_elem(offmap, key);
+	up_read(&bpf_devs_lock);
+
+	return ret;
+}
+
+int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
+{
+	struct bpf_offloaded_map *offmap = map_to_offmap(map);
+	int ret = -ENODEV;
+
+	down_read(&bpf_devs_lock);
+	if (offmap->netdev)
+		ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
+	up_read(&bpf_devs_lock);
+
+	return ret;
+}
+
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map)
+{
+	struct bpf_offloaded_map *offmap;
+	struct bpf_prog_offload *offload;
+	bool ret;
+
+	if (!bpf_prog_is_dev_bound(prog->aux) || !bpf_map_is_dev_bound(map))
+		return false;
+
+	down_read(&bpf_devs_lock);
+	offload = prog->aux->offload;
+	offmap = map_to_offmap(map);
+
+	ret = offload && offload->netdev == offmap->netdev;
+	up_read(&bpf_devs_lock);
+
+	return ret;
+}
+
+static void bpf_offload_orphan_all_progs(struct net_device *netdev)
+{
+	struct bpf_prog_offload *offload, *tmp;
+
+	list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads)
+		if (offload->netdev == netdev)
+			__bpf_prog_offload_destroy(offload->prog);
+}
+
+static void bpf_offload_orphan_all_maps(struct net_device *netdev)
+{
+	struct bpf_offloaded_map *offmap, *tmp;
+
+	list_for_each_entry_safe(offmap, tmp, &bpf_map_offload_devs, offloads)
+		if (offmap->netdev == netdev)
+			__bpf_map_offload_destroy(offmap);
+}
+
 static int bpf_offload_notification(struct notifier_block *notifier,
 				    ulong event, void *ptr)
 {
 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
-	struct bpf_dev_offload *offload, *tmp;
 
 	ASSERT_RTNL();
 
@@ -253,11 +440,8 @@ static int bpf_offload_notification(struct notifier_block *notifier,
 			break;
 
 		down_write(&bpf_devs_lock);
-		list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
-					 offloads) {
-			if (offload->netdev == netdev)
-				__bpf_prog_offload_destroy(offload->prog);
-		}
+		bpf_offload_orphan_all_progs(netdev);
+		bpf_offload_orphan_all_maps(netdev);
 		up_write(&bpf_devs_lock);
 		break;
 	default:
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 0799686..0314d17 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -513,13 +513,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
 	if (!stab)
 		return ERR_PTR(-ENOMEM);
 
-	/* mandatory map attributes */
-	stab->map.map_type = attr->map_type;
-	stab->map.key_size = attr->key_size;
-	stab->map.value_size = attr->value_size;
-	stab->map.max_entries = attr->max_entries;
-	stab->map.map_flags = attr->map_flags;
-	stab->map.numa_node = bpf_map_attr_numa_node(attr);
+	bpf_map_init_from_attr(&stab->map, attr);
 
 	/* make sure page count doesn't overflow */
 	cost = (u64) stab->map.max_entries * sizeof(struct sock *);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 6c63c22..b0ecf43 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -88,14 +88,10 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
 	if (cost >= U32_MAX - PAGE_SIZE)
 		goto free_smap;
 
-	smap->map.map_type = attr->map_type;
-	smap->map.key_size = attr->key_size;
+	bpf_map_init_from_attr(&smap->map, attr);
 	smap->map.value_size = value_size;
-	smap->map.max_entries = attr->max_entries;
-	smap->map.map_flags = attr->map_flags;
 	smap->n_buckets = n_buckets;
 	smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
-	smap->map.numa_node = bpf_map_attr_numa_node(attr);
 
 	err = bpf_map_precharge_memlock(smap->map.pages);
 	if (err)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2bac0dc..c691b9e 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -94,18 +94,34 @@ static int check_uarg_tail_zero(void __user *uaddr,
 	return 0;
 }
 
+const struct bpf_map_ops bpf_map_offload_ops = {
+	.map_alloc = bpf_map_offload_map_alloc,
+	.map_free = bpf_map_offload_map_free,
+};
+
 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 {
+	const struct bpf_map_ops *ops;
 	struct bpf_map *map;
+	int err;
 
-	if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
-	    !bpf_map_types[attr->map_type])
+	if (attr->map_type >= ARRAY_SIZE(bpf_map_types))
+		return ERR_PTR(-EINVAL);
+	ops = bpf_map_types[attr->map_type];
+	if (!ops)
 		return ERR_PTR(-EINVAL);
 
-	map = bpf_map_types[attr->map_type]->map_alloc(attr);
+	if (ops->map_alloc_check) {
+		err = ops->map_alloc_check(attr);
+		if (err)
+			return ERR_PTR(err);
+	}
+	if (attr->map_ifindex)
+		ops = &bpf_map_offload_ops;
+	map = ops->map_alloc(attr);
 	if (IS_ERR(map))
 		return map;
-	map->ops = bpf_map_types[attr->map_type];
+	map->ops = ops;
 	map->map_type = attr->map_type;
 	return map;
 }
@@ -134,6 +150,16 @@ void bpf_map_area_free(void *area)
 	kvfree(area);
 }
 
+void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
+{
+	map->map_type = attr->map_type;
+	map->key_size = attr->key_size;
+	map->value_size = attr->value_size;
+	map->max_entries = attr->max_entries;
+	map->map_flags = attr->map_flags;
+	map->numa_node = bpf_map_attr_numa_node(attr);
+}
+
 int bpf_map_precharge_memlock(u32 pages)
 {
 	struct user_struct *user = get_current_user();
@@ -189,16 +215,25 @@ static int bpf_map_alloc_id(struct bpf_map *map)
 	return id > 0 ? 0 : id;
 }
 
-static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
+void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
 {
 	unsigned long flags;
 
+	/* Offloaded maps are removed from the IDR store when their device
+	 * disappears - even if someone holds an fd to them they are unusable,
+	 * the memory is gone, all ops will fail; they are simply waiting for
+	 * refcnt to drop to be freed.
+	 */
+	if (!map->id)
+		return;
+
 	if (do_idr_lock)
 		spin_lock_irqsave(&map_idr_lock, flags);
 	else
 		__acquire(&map_idr_lock);
 
 	idr_remove(&map_idr, map->id);
+	map->id = 0;
 
 	if (do_idr_lock)
 		spin_unlock_irqrestore(&map_idr_lock, flags);
@@ -378,7 +413,7 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
 	return 0;
 }
 
-#define BPF_MAP_CREATE_LAST_FIELD map_name
+#define BPF_MAP_CREATE_LAST_FIELD map_ifindex
 /* called via syscall */
 static int map_create(union bpf_attr *attr)
 {
@@ -566,8 +601,10 @@ static int map_lookup_elem(union bpf_attr *attr)
 	if (!value)
 		goto free_key;
 
-	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
-	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+	if (bpf_map_is_dev_bound(map)) {
+		err = bpf_map_offload_lookup_elem(map, key, value);
+	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+		   map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
 		err = bpf_percpu_hash_copy(map, key, value);
 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
 		err = bpf_percpu_array_copy(map, key, value);
@@ -654,7 +691,10 @@ static int map_update_elem(union bpf_attr *attr)
 		goto free_value;
 
 	/* Need to create a kthread, thus must support schedule */
-	if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
+	if (bpf_map_is_dev_bound(map)) {
+		err = bpf_map_offload_update_elem(map, key, value, attr->flags);
+		goto out;
+	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
 		err = map->ops->map_update_elem(map, key, value, attr->flags);
 		goto out;
 	}
@@ -731,6 +771,11 @@ static int map_delete_elem(union bpf_attr *attr)
 		goto err_put;
 	}
 
+	if (bpf_map_is_dev_bound(map)) {
+		err = bpf_map_offload_delete_elem(map, key);
+		goto out;
+	}
+
 	preempt_disable();
 	__this_cpu_inc(bpf_prog_active);
 	rcu_read_lock();
@@ -738,7 +783,7 @@ static int map_delete_elem(union bpf_attr *attr)
 	rcu_read_unlock();
 	__this_cpu_dec(bpf_prog_active);
 	preempt_enable();
-
+out:
 	if (!err)
 		trace_bpf_map_delete_elem(map, ufd, key);
 	kfree(key);
@@ -788,9 +833,15 @@ static int map_get_next_key(union bpf_attr *attr)
 	if (!next_key)
 		goto free_key;
 
+	if (bpf_map_is_dev_bound(map)) {
+		err = bpf_map_offload_get_next_key(map, key, next_key);
+		goto out;
+	}
+
 	rcu_read_lock();
 	err = map->ops->map_get_next_key(map, key, next_key);
 	rcu_read_unlock();
+out:
 	if (err)
 		goto free_next_key;
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 48b61ca..2e7a43e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3091,6 +3091,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 			return -EINVAL;
 		}
 
+		if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
+			verbose(env, "BPF_ARSH not supported for 32 bit ALU\n");
+			return -EINVAL;
+		}
+
 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
@@ -4816,6 +4821,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
 			return -EINVAL;
 		}
 	}
+
+	if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
+	    !bpf_offload_dev_match(prog, map)) {
+		verbose(env, "offload device mismatch between prog and map\n");
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
@@ -5354,7 +5366,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
 			 */
 			map_ptr = env->insn_aux_data[i + delta].map_ptr;
 			if (map_ptr == BPF_MAP_PTR_POISON) {
-				verbose(env, "tail_call obusing map_ptr\n");
+				verbose(env, "tail_call abusing map_ptr\n");
 				return -EINVAL;
 			}
 			if (!map_ptr->unpriv_array)
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index b366389..4f63597 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -410,7 +410,7 @@ static int __init crash_save_vmcoreinfo_init(void)
 	VMCOREINFO_SYMBOL(contig_page_data);
 #endif
 #ifdef CONFIG_SPARSEMEM
-	VMCOREINFO_SYMBOL(mem_section);
+	VMCOREINFO_SYMBOL_ARRAY(mem_section);
 	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
 	VMCOREINFO_STRUCT_SIZE(mem_section);
 	VMCOREINFO_OFFSET(mem_section, section_mem_map);
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
new file mode 100644
index 0000000..21b0122
--- /dev/null
+++ b/kernel/fail_function.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fail_function.c: Function-based error injection
+ */
+#include <linux/error-injection.h>
+#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
+
+struct fei_attr {
+	struct list_head list;
+	struct kprobe kp;
+	unsigned long retval;
+};
+static DEFINE_MUTEX(fei_lock);
+static LIST_HEAD(fei_attr_list);
+static DECLARE_FAULT_ATTR(fei_fault_attr);
+static struct dentry *fei_debugfs_dir;
+
+static unsigned long adjust_error_retval(unsigned long addr, unsigned long retv)
+{
+	switch (get_injectable_error_type(addr)) {
+	case EI_ETYPE_NULL:
+		if (retv != 0)
+			return 0;
+		break;
+	case EI_ETYPE_ERRNO:
+		if (retv < (unsigned long)-MAX_ERRNO)
+			return (unsigned long)-EINVAL;
+		break;
+	case EI_ETYPE_ERRNO_NULL:
+		if (retv != 0 && retv < (unsigned long)-MAX_ERRNO)
+			return (unsigned long)-EINVAL;
+		break;
+	}
+
+	return retv;
+}
+
+static struct fei_attr *fei_attr_new(const char *sym, unsigned long addr)
+{
+	struct fei_attr *attr;
+
+	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+	if (attr) {
+		attr->kp.symbol_name = kstrdup(sym, GFP_KERNEL);
+		if (!attr->kp.symbol_name) {
+			kfree(attr);
+			return NULL;
+		}
+		attr->kp.pre_handler = fei_kprobe_handler;
+		attr->retval = adjust_error_retval(addr, 0);
+		INIT_LIST_HEAD(&attr->list);
+	}
+	return attr;
+}
+
+static void fei_attr_free(struct fei_attr *attr)
+{
+	if (attr) {
+		kfree(attr->kp.symbol_name);
+		kfree(attr);
+	}
+}
+
+static struct fei_attr *fei_attr_lookup(const char *sym)
+{
+	struct fei_attr *attr;
+
+	list_for_each_entry(attr, &fei_attr_list, list) {
+		if (!strcmp(attr->kp.symbol_name, sym))
+			return attr;
+	}
+
+	return NULL;
+}
+
+static bool fei_attr_is_valid(struct fei_attr *_attr)
+{
+	struct fei_attr *attr;
+
+	list_for_each_entry(attr, &fei_attr_list, list) {
+		if (attr == _attr)
+			return true;
+	}
+
+	return false;
+}
+
+static int fei_retval_set(void *data, u64 val)
+{
+	struct fei_attr *attr = data;
+	unsigned long retv = (unsigned long)val;
+	int err = 0;
+
+	mutex_lock(&fei_lock);
+	/*
+	 * Since this operation can be done after retval file is removed,
+	 * It is safer to check the attr is still valid before accessing
+	 * its member.
+	 */
+	if (!fei_attr_is_valid(attr)) {
+		err = -ENOENT;
+		goto out;
+	}
+
+	if (attr->kp.addr) {
+		if (adjust_error_retval((unsigned long)attr->kp.addr,
+					val) != retv)
+			err = -EINVAL;
+	}
+	if (!err)
+		attr->retval = val;
+out:
+	mutex_unlock(&fei_lock);
+
+	return err;
+}
+
+static int fei_retval_get(void *data, u64 *val)
+{
+	struct fei_attr *attr = data;
+	int err = 0;
+
+	mutex_lock(&fei_lock);
+	/* Here we also validate @attr to ensure it still exists. */
+	if (!fei_attr_is_valid(attr))
+		err = -ENOENT;
+	else
+		*val = attr->retval;
+	mutex_unlock(&fei_lock);
+
+	return err;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fei_retval_ops, fei_retval_get, fei_retval_set,
+			 "%llx\n");
+
+static int fei_debugfs_add_attr(struct fei_attr *attr)
+{
+	struct dentry *dir;
+
+	dir = debugfs_create_dir(attr->kp.symbol_name, fei_debugfs_dir);
+	if (!dir)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("retval", 0600, dir, attr, &fei_retval_ops)) {
+		debugfs_remove_recursive(dir);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void fei_debugfs_remove_attr(struct fei_attr *attr)
+{
+	struct dentry *dir;
+
+	dir = debugfs_lookup(attr->kp.symbol_name, fei_debugfs_dir);
+	if (dir)
+		debugfs_remove_recursive(dir);
+}
+
+static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
+{
+	struct fei_attr *attr = container_of(kp, struct fei_attr, kp);
+
+	if (should_fail(&fei_fault_attr, 1)) {
+		regs_set_return_value(regs, attr->retval);
+		override_function_with_return(regs);
+		/* Kprobe specific fixup */
+		reset_current_kprobe();
+		preempt_enable_no_resched();
+		return 1;
+	}
+
+	return 0;
+}
+NOKPROBE_SYMBOL(fei_kprobe_handler)
+
+static void *fei_seq_start(struct seq_file *m, loff_t *pos)
+{
+	mutex_lock(&fei_lock);
+	return seq_list_start(&fei_attr_list, *pos);
+}
+
+static void fei_seq_stop(struct seq_file *m, void *v)
+{
+	mutex_unlock(&fei_lock);
+}
+
+static void *fei_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	return seq_list_next(v, &fei_attr_list, pos);
+}
+
+static int fei_seq_show(struct seq_file *m, void *v)
+{
+	struct fei_attr *attr = list_entry(v, struct fei_attr, list);
+
+	seq_printf(m, "%pf\n", attr->kp.addr);
+	return 0;
+}
+
+static const struct seq_operations fei_seq_ops = {
+	.start	= fei_seq_start,
+	.next	= fei_seq_next,
+	.stop	= fei_seq_stop,
+	.show	= fei_seq_show,
+};
+
+static int fei_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &fei_seq_ops);
+}
+
+static void fei_attr_remove(struct fei_attr *attr)
+{
+	fei_debugfs_remove_attr(attr);
+	unregister_kprobe(&attr->kp);
+	list_del(&attr->list);
+	fei_attr_free(attr);
+}
+
+static void fei_attr_remove_all(void)
+{
+	struct fei_attr *attr, *n;
+
+	list_for_each_entry_safe(attr, n, &fei_attr_list, list) {
+		fei_attr_remove(attr);
+	}
+}
+
+static ssize_t fei_write(struct file *file, const char __user *buffer,
+			 size_t count, loff_t *ppos)
+{
+	struct fei_attr *attr;
+	unsigned long addr;
+	char *buf, *sym;
+	int ret;
+
+	/* cut off if it is too long */
+	if (count > KSYM_NAME_LEN)
+		count = KSYM_NAME_LEN;
+	buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, buffer, count)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	buf[count] = '\0';
+	sym = strstrip(buf);
+
+	mutex_lock(&fei_lock);
+
+	/* Writing just spaces will remove all injection points */
+	if (sym[0] == '\0') {
+		fei_attr_remove_all();
+		ret = count;
+		goto out;
+	}
+	/* Writing !function will remove one injection point */
+	if (sym[0] == '!') {
+		attr = fei_attr_lookup(sym + 1);
+		if (!attr) {
+			ret = -ENOENT;
+			goto out;
+		}
+		fei_attr_remove(attr);
+		ret = count;
+		goto out;
+	}
+
+	addr = kallsyms_lookup_name(sym);
+	if (!addr) {
+		ret = -EINVAL;
+		goto out;
+	}
+	if (!within_error_injection_list(addr)) {
+		ret = -ERANGE;
+		goto out;
+	}
+	if (fei_attr_lookup(sym)) {
+		ret = -EBUSY;
+		goto out;
+	}
+	attr = fei_attr_new(sym, addr);
+	if (!attr) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = register_kprobe(&attr->kp);
+	if (!ret)
+		ret = fei_debugfs_add_attr(attr);
+	if (ret < 0)
+		fei_attr_remove(attr);
+	else {
+		list_add_tail(&attr->list, &fei_attr_list);
+		ret = count;
+	}
+out:
+	kfree(buf);
+	mutex_unlock(&fei_lock);
+	return ret;
+}
+
+static const struct file_operations fei_ops = {
+	.open =		fei_open,
+	.read =		seq_read,
+	.write =	fei_write,
+	.llseek =	seq_lseek,
+	.release =	seq_release,
+};
+
+static int __init fei_debugfs_init(void)
+{
+	struct dentry *dir;
+
+	dir = fault_create_debugfs_attr("fail_function", NULL,
+					&fei_fault_attr);
+	if (IS_ERR(dir))
+		return PTR_ERR(dir);
+
+	/* injectable attribute is just a symlink of error_inject/list */
+	if (!debugfs_create_symlink("injectable", dir,
+				    "../error_injection/list"))
+		goto error;
+
+	if (!debugfs_create_file("inject", 0600, dir, NULL, &fei_ops))
+		goto error;
+
+	fei_debugfs_dir = dir;
+
+	return 0;
+error:
+	debugfs_remove_recursive(dir);
+	return -ENOMEM;
+}
+
+late_initcall(fei_debugfs_init);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index b4aab48..da2ccf1 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -83,16 +83,6 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
 	return &(kretprobe_table_locks[hash].lock);
 }
 
-/* List of symbols that can be overriden for error injection. */
-static LIST_HEAD(kprobe_error_injection_list);
-static DEFINE_MUTEX(kprobe_ei_mutex);
-struct kprobe_ei_entry {
-	struct list_head list;
-	unsigned long start_addr;
-	unsigned long end_addr;
-	void *priv;
-};
-
 /* Blacklist -- list of struct kprobe_blacklist_entry */
 static LIST_HEAD(kprobe_blacklist);
 
@@ -1404,17 +1394,6 @@ bool within_kprobe_blacklist(unsigned long addr)
 	return false;
 }
 
-bool within_kprobe_error_injection_list(unsigned long addr)
-{
-	struct kprobe_ei_entry *ent;
-
-	list_for_each_entry(ent, &kprobe_error_injection_list, list) {
-		if (addr >= ent->start_addr && addr < ent->end_addr)
-			return true;
-	}
-	return false;
-}
-
 /*
  * If we have a symbol_name argument, look it up and add the offset field
  * to it. This way, we can specify a relative address to a symbol.
@@ -2189,86 +2168,6 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
 	return 0;
 }
 
-#ifdef CONFIG_BPF_KPROBE_OVERRIDE
-/* Markers of the _kprobe_error_inject_list section */
-extern unsigned long __start_kprobe_error_inject_list[];
-extern unsigned long __stop_kprobe_error_inject_list[];
-
-/*
- * Lookup and populate the kprobe_error_injection_list.
- *
- * For safety reasons we only allow certain functions to be overriden with
- * bpf_error_injection, so we need to populate the list of the symbols that have
- * been marked as safe for overriding.
- */
-static void populate_kprobe_error_injection_list(unsigned long *start,
-						 unsigned long *end,
-						 void *priv)
-{
-	unsigned long *iter;
-	struct kprobe_ei_entry *ent;
-	unsigned long entry, offset = 0, size = 0;
-
-	mutex_lock(&kprobe_ei_mutex);
-	for (iter = start; iter < end; iter++) {
-		entry = arch_deref_entry_point((void *)*iter);
-
-		if (!kernel_text_address(entry) ||
-		    !kallsyms_lookup_size_offset(entry, &size, &offset)) {
-			pr_err("Failed to find error inject entry at %p\n",
-				(void *)entry);
-			continue;
-		}
-
-		ent = kmalloc(sizeof(*ent), GFP_KERNEL);
-		if (!ent)
-			break;
-		ent->start_addr = entry;
-		ent->end_addr = entry + size;
-		ent->priv = priv;
-		INIT_LIST_HEAD(&ent->list);
-		list_add_tail(&ent->list, &kprobe_error_injection_list);
-	}
-	mutex_unlock(&kprobe_ei_mutex);
-}
-
-static void __init populate_kernel_kprobe_ei_list(void)
-{
-	populate_kprobe_error_injection_list(__start_kprobe_error_inject_list,
-					     __stop_kprobe_error_inject_list,
-					     NULL);
-}
-
-static void module_load_kprobe_ei_list(struct module *mod)
-{
-	if (!mod->num_kprobe_ei_funcs)
-		return;
-	populate_kprobe_error_injection_list(mod->kprobe_ei_funcs,
-					     mod->kprobe_ei_funcs +
-					     mod->num_kprobe_ei_funcs, mod);
-}
-
-static void module_unload_kprobe_ei_list(struct module *mod)
-{
-	struct kprobe_ei_entry *ent, *n;
-	if (!mod->num_kprobe_ei_funcs)
-		return;
-
-	mutex_lock(&kprobe_ei_mutex);
-	list_for_each_entry_safe(ent, n, &kprobe_error_injection_list, list) {
-		if (ent->priv == mod) {
-			list_del_init(&ent->list);
-			kfree(ent);
-		}
-	}
-	mutex_unlock(&kprobe_ei_mutex);
-}
-#else
-static inline void __init populate_kernel_kprobe_ei_list(void) {}
-static inline void module_load_kprobe_ei_list(struct module *m) {}
-static inline void module_unload_kprobe_ei_list(struct module *m) {}
-#endif
-
 /* Module notifier call back, checking kprobes on the module */
 static int kprobes_module_callback(struct notifier_block *nb,
 				   unsigned long val, void *data)
@@ -2279,11 +2178,6 @@ static int kprobes_module_callback(struct notifier_block *nb,
 	unsigned int i;
 	int checkcore = (val == MODULE_STATE_GOING);
 
-	if (val == MODULE_STATE_COMING)
-		module_load_kprobe_ei_list(mod);
-	else if (val == MODULE_STATE_GOING)
-		module_unload_kprobe_ei_list(mod);
-
 	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
 		return NOTIFY_DONE;
 
@@ -2346,8 +2240,6 @@ static int __init init_kprobes(void)
 		pr_err("Please take care of using kprobes.\n");
 	}
 
-	populate_kernel_kprobe_ei_list();
-
 	if (kretprobe_blacklist_size) {
 		/* lookup the function address from its name */
 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
@@ -2515,56 +2407,6 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
 	.release        = seq_release,
 };
 
-/*
- * kprobes/error_injection_list -- shows which functions can be overriden for
- * error injection.
- * */
-static void *kprobe_ei_seq_start(struct seq_file *m, loff_t *pos)
-{
-	mutex_lock(&kprobe_ei_mutex);
-	return seq_list_start(&kprobe_error_injection_list, *pos);
-}
-
-static void kprobe_ei_seq_stop(struct seq_file *m, void *v)
-{
-	mutex_unlock(&kprobe_ei_mutex);
-}
-
-static void *kprobe_ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
-{
-	return seq_list_next(v, &kprobe_error_injection_list, pos);
-}
-
-static int kprobe_ei_seq_show(struct seq_file *m, void *v)
-{
-	char buffer[KSYM_SYMBOL_LEN];
-	struct kprobe_ei_entry *ent =
-		list_entry(v, struct kprobe_ei_entry, list);
-
-	sprint_symbol(buffer, ent->start_addr);
-	seq_printf(m, "%s\n", buffer);
-	return 0;
-}
-
-static const struct seq_operations kprobe_ei_seq_ops = {
-	.start = kprobe_ei_seq_start,
-	.next  = kprobe_ei_seq_next,
-	.stop  = kprobe_ei_seq_stop,
-	.show  = kprobe_ei_seq_show,
-};
-
-static int kprobe_ei_open(struct inode *inode, struct file *filp)
-{
-	return seq_open(filp, &kprobe_ei_seq_ops);
-}
-
-static const struct file_operations debugfs_kprobe_ei_ops = {
-	.open           = kprobe_ei_open,
-	.read           = seq_read,
-	.llseek         = seq_lseek,
-	.release        = seq_release,
-};
-
 static void arm_all_kprobes(void)
 {
 	struct hlist_head *head;
@@ -2706,11 +2548,6 @@ static int __init debugfs_kprobe_init(void)
 	if (!file)
 		goto error;
 
-	file = debugfs_create_file("error_injection_list", 0444, dir, NULL,
-				  &debugfs_kprobe_ei_ops);
-	if (!file)
-		goto error;
-
 	return 0;
 
 error:
diff --git a/kernel/module.c b/kernel/module.c
index bd695bf..601494d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3118,10 +3118,10 @@ static int find_module_sections(struct module *mod, struct load_info *info)
 					     sizeof(*mod->ftrace_callsites),
 					     &mod->num_ftrace_callsites);
 #endif
-#ifdef CONFIG_BPF_KPROBE_OVERRIDE
-	mod->kprobe_ei_funcs = section_objs(info, "_kprobe_error_inject_list",
-					    sizeof(*mod->kprobe_ei_funcs),
-					    &mod->num_kprobe_ei_funcs);
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+	mod->ei_funcs = section_objs(info, "_error_injection_whitelist",
+					    sizeof(*mod->ei_funcs),
+					    &mod->num_ei_funcs);
 #endif
 	mod->extable = section_objs(info, "__ex_table",
 				    sizeof(*mod->extable), &mod->num_exentries);
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index 2ddaec4..0926aef 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -34,11 +34,6 @@ void complete(struct completion *x)
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 
-	/*
-	 * Perform commit of crossrelease here.
-	 */
-	complete_release_commit(x);
-
 	if (x->done != UINT_MAX)
 		x->done++;
 	__wake_up_locked(&x->wait, TASK_NORMAL, 1);
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index dd79087..9bcbacb 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -89,7 +89,9 @@ static int membarrier_private_expedited(void)
 		rcu_read_unlock();
 	}
 	if (!fallback) {
+		preempt_disable();
 		smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
+		preempt_enable();
 		free_cpumask_var(tmpmask);
 	}
 	cpus_read_unlock();
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index ae3a2d5..0b249e2 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -355,7 +355,7 @@
 	  on if you need to profile the system's use of these macros.
 
 config PROFILE_ALL_BRANCHES
-	bool "Profile all if conditionals"
+	bool "Profile all if conditionals" if !FORTIFY_SOURCE
 	select TRACE_BRANCH_PROFILING
 	help
 	  This tracer profiles all branch conditions. Every if ()
@@ -533,9 +533,7 @@
 config BPF_KPROBE_OVERRIDE
 	bool "Enable BPF programs to override a kprobed function"
 	depends on BPF_EVENTS
-	depends on KPROBES_ON_FTRACE
-	depends on HAVE_KPROBE_OVERRIDE
-	depends on DYNAMIC_FTRACE_WITH_REGS
+	depends on FUNCTION_ERROR_INJECTION
 	default n
 	help
 	 Allows BPF to override the execution of a probed function and
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index f6d2327..f274468 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -14,7 +14,7 @@
 #include <linux/uaccess.h>
 #include <linux/ctype.h>
 #include <linux/kprobes.h>
-#include <asm/kprobes.h>
+#include <linux/error-injection.h>
 
 #include "trace_probe.h"
 #include "trace.h"
@@ -83,9 +83,8 @@ EXPORT_SYMBOL_GPL(trace_call_bpf);
 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
 {
-	__this_cpu_write(bpf_kprobe_override, 1);
 	regs_set_return_value(regs, rc);
-	arch_ftrace_kprobe_override_function(regs);
+	override_function_with_return(regs);
 	return 0;
 }
 
@@ -800,11 +799,11 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
 	int ret = -EEXIST;
 
 	/*
-	 * Kprobe override only works for ftrace based kprobes, and only if they
-	 * are on the opt-in list.
+	 * Kprobe override only works if they are on the function entry,
+	 * and only if they are on the opt-in list.
 	 */
 	if (prog->kprobe_override &&
-	    (!trace_kprobe_ftrace(event->tp_event) ||
+	    (!trace_kprobe_on_func_entry(event->tp_event) ||
 	     !trace_kprobe_error_injectable(event->tp_event)))
 		return -EINVAL;
 
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9ab1899..0cddf60 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2534,29 +2534,59 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
  * The lock and unlock are done within a preempt disable section.
  * The current_context per_cpu variable can only be modified
  * by the current task between lock and unlock. But it can
- * be modified more than once via an interrupt. There are four
- * different contexts that we need to consider.
+ * be modified more than once via an interrupt. To pass this
+ * information from the lock to the unlock without having to
+ * access the 'in_interrupt()' functions again (which do show
+ * a bit of overhead in something as critical as function tracing,
+ * we use a bitmask trick.
  *
- *  Normal context.
- *  SoftIRQ context
- *  IRQ context
- *  NMI context
+ *  bit 0 =  NMI context
+ *  bit 1 =  IRQ context
+ *  bit 2 =  SoftIRQ context
+ *  bit 3 =  normal context.
  *
- * If for some reason the ring buffer starts to recurse, we
- * only allow that to happen at most 4 times (one for each
- * context). If it happens 5 times, then we consider this a
- * recusive loop and do not let it go further.
+ * This works because this is the order of contexts that can
+ * preempt other contexts. A SoftIRQ never preempts an IRQ
+ * context.
+ *
+ * When the context is determined, the corresponding bit is
+ * checked and set (if it was set, then a recursion of that context
+ * happened).
+ *
+ * On unlock, we need to clear this bit. To do so, just subtract
+ * 1 from the current_context and AND it to itself.
+ *
+ * (binary)
+ *  101 - 1 = 100
+ *  101 & 100 = 100 (clearing bit zero)
+ *
+ *  1010 - 1 = 1001
+ *  1010 & 1001 = 1000 (clearing bit 1)
+ *
+ * The least significant bit can be cleared this way, and it
+ * just so happens that it is the same bit corresponding to
+ * the current context.
  */
 
 static __always_inline int
 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 {
-	if (cpu_buffer->current_context >= 4)
+	unsigned int val = cpu_buffer->current_context;
+	unsigned long pc = preempt_count();
+	int bit;
+
+	if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+		bit = RB_CTX_NORMAL;
+	else
+		bit = pc & NMI_MASK ? RB_CTX_NMI :
+			pc & HARDIRQ_MASK ? RB_CTX_IRQ :
+			pc & SOFTIRQ_OFFSET ? 2 : RB_CTX_SOFTIRQ;
+
+	if (unlikely(val & (1 << bit)))
 		return 1;
 
-	cpu_buffer->current_context++;
-	/* Interrupts must see this update */
-	barrier();
+	val |= (1 << bit);
+	cpu_buffer->current_context = val;
 
 	return 0;
 }
@@ -2564,9 +2594,7 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 static __always_inline void
 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
 {
-	/* Don't let the dec leak out */
-	barrier();
-	cpu_buffer->current_context--;
+	cpu_buffer->current_context &= cpu_buffer->current_context - 1;
 }
 
 /**
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 91f4b57..1fad24a 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/uaccess.h>
 #include <linux/rculist.h>
+#include <linux/error-injection.h>
 
 #include "trace_probe.h"
 
@@ -42,8 +43,6 @@ struct trace_kprobe {
 	(offsetof(struct trace_kprobe, tp.args) +	\
 	(sizeof(struct probe_arg) * (n)))
 
-DEFINE_PER_CPU(int, bpf_kprobe_override);
-
 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
 {
 	return tk->rp.handler != NULL;
@@ -88,13 +87,16 @@ static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
 	return nhit;
 }
 
-int trace_kprobe_ftrace(struct trace_event_call *call)
+bool trace_kprobe_on_func_entry(struct trace_event_call *call)
 {
 	struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
-	return kprobe_ftrace(&tk->rp.kp);
+
+	return kprobe_on_func_entry(tk->rp.kp.addr,
+			tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
+			tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
 }
 
-int trace_kprobe_error_injectable(struct trace_event_call *call)
+bool trace_kprobe_error_injectable(struct trace_event_call *call)
 {
 	struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
 	unsigned long addr;
@@ -106,7 +108,7 @@ int trace_kprobe_error_injectable(struct trace_event_call *call)
 	} else {
 		addr = (unsigned long)tk->rp.kp.addr;
 	}
-	return within_kprobe_error_injection_list(addr);
+	return within_error_injection_list(addr);
 }
 
 static int register_kprobe_event(struct trace_kprobe *tk);
@@ -1202,6 +1204,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 	int rctx;
 
 	if (bpf_prog_array_valid(call)) {
+		unsigned long orig_ip = instruction_pointer(regs);
 		int ret;
 
 		ret = trace_call_bpf(call, regs);
@@ -1209,12 +1212,13 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 		/*
 		 * We need to check and see if we modified the pc of the
 		 * pt_regs, and if so clear the kprobe and return 1 so that we
-		 * don't do the instruction skipping.  Also reset our state so
-		 * we are clean the next pass through.
+		 * don't do the single stepping.
+		 * The ftrace kprobe handler leaves it up to us to re-enable
+		 * preemption here before returning if we've modified the ip.
 		 */
-		if (__this_cpu_read(bpf_kprobe_override)) {
-			__this_cpu_write(bpf_kprobe_override, 0);
+		if (orig_ip != instruction_pointer(regs)) {
 			reset_current_kprobe();
+			preempt_enable_no_resched();
 			return 1;
 		}
 		if (!ret)
@@ -1322,15 +1326,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
 	if (tk->tp.flags & TP_FLAG_TRACE)
 		kprobe_trace_func(tk, regs);
 #ifdef CONFIG_PERF_EVENTS
-	if (tk->tp.flags & TP_FLAG_PROFILE) {
+	if (tk->tp.flags & TP_FLAG_PROFILE)
 		ret = kprobe_perf_func(tk, regs);
-		/*
-		 * The ftrace kprobe handler leaves it up to us to re-enable
-		 * preemption here before returning if we've modified the ip.
-		 */
-		if (ret)
-			preempt_enable_no_resched();
-	}
 #endif
 	return ret;
 }
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 5e54d74..e101c5b 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -252,8 +252,8 @@ struct symbol_cache;
 unsigned long update_symbol_cache(struct symbol_cache *sc);
 void free_symbol_cache(struct symbol_cache *sc);
 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset);
-int trace_kprobe_ftrace(struct trace_event_call *call);
-int trace_kprobe_error_injectable(struct trace_event_call *call);
+bool trace_kprobe_on_func_entry(struct trace_event_call *call);
+bool trace_kprobe_error_injectable(struct trace_event_call *call);
 #else
 /* uprobes do not support symbol fetch methods */
 #define fetch_symbol_u8			NULL
@@ -280,14 +280,14 @@ alloc_symbol_cache(const char *sym, long offset)
 	return NULL;
 }
 
-static inline int trace_kprobe_ftrace(struct trace_event_call *call)
+static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call)
 {
-	return 0;
+	return false;
 }
 
-static inline int trace_kprobe_error_injectable(struct trace_event_call *call)
+static inline bool trace_kprobe_error_injectable(struct trace_event_call *call)
 {
-	return 0;
+	return false;
 }
 #endif /* CONFIG_KPROBE_EVENTS */
 
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9d5b78a..890d476 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1500,6 +1500,10 @@
 	  Provide fault-injection framework.
 	  For more details, see Documentation/fault-injection/.
 
+config FUNCTION_ERROR_INJECTION
+	def_bool y
+	depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
+
 config FAILSLAB
 	bool "Fault-injection capability for kmalloc"
 	depends on FAULT_INJECTION
@@ -1547,6 +1551,16 @@
 	help
 	  Provide fault-injection capability for futexes.
 
+config FAIL_FUNCTION
+	bool "Fault-injection capability for functions"
+	depends on FAULT_INJECTION_DEBUG_FS && FUNCTION_ERROR_INJECTION
+	help
+	  Provide function-based fault-injection capability.
+	  This will allow you to override a specific function with a return
+	  with given return value. As a result, function caller will see
+	  an error value and have to handle it. This is useful to test the
+	  error handling in various subsystems.
+
 config FAULT_INJECTION_DEBUG_FS
 	bool "Debugfs entries for fault-injection capabilities"
 	depends on FAULT_INJECTION && SYSFS && DEBUG_FS
diff --git a/lib/Makefile b/lib/Makefile
index a6c8529..75ec137 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -149,6 +149,7 @@
 obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
 obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
 	of-reconfig-notifier-error-inject.o
+obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
 
 lib-$(CONFIG_GENERIC_BUG) += bug.o
 
diff --git a/lib/error-inject.c b/lib/error-inject.c
new file mode 100644
index 0000000..c0d4600
--- /dev/null
+++ b/lib/error-inject.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+// error-inject.c: Function-level error injection table
+#include <linux/error-injection.h>
+#include <linux/debugfs.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+/* Whitelist of symbols that can be overridden for error injection. */
+static LIST_HEAD(error_injection_list);
+static DEFINE_MUTEX(ei_mutex);
+struct ei_entry {
+	struct list_head list;
+	unsigned long start_addr;
+	unsigned long end_addr;
+	int etype;
+	void *priv;
+};
+
+bool within_error_injection_list(unsigned long addr)
+{
+	struct ei_entry *ent;
+	bool ret = false;
+
+	mutex_lock(&ei_mutex);
+	list_for_each_entry(ent, &error_injection_list, list) {
+		if (addr >= ent->start_addr && addr < ent->end_addr) {
+			ret = true;
+			break;
+		}
+	}
+	mutex_unlock(&ei_mutex);
+	return ret;
+}
+
+int get_injectable_error_type(unsigned long addr)
+{
+	struct ei_entry *ent;
+
+	list_for_each_entry(ent, &error_injection_list, list) {
+		if (addr >= ent->start_addr && addr < ent->end_addr)
+			return ent->etype;
+	}
+	return EI_ETYPE_NONE;
+}
+
+/*
+ * Lookup and populate the error_injection_list.
+ *
+ * For safety reasons we only allow certain functions to be overridden with
+ * bpf_error_injection, so we need to populate the list of the symbols that have
+ * been marked as safe for overriding.
+ */
+static void populate_error_injection_list(struct error_injection_entry *start,
+					  struct error_injection_entry *end,
+					  void *priv)
+{
+	struct error_injection_entry *iter;
+	struct ei_entry *ent;
+	unsigned long entry, offset = 0, size = 0;
+
+	mutex_lock(&ei_mutex);
+	for (iter = start; iter < end; iter++) {
+		entry = arch_deref_entry_point((void *)iter->addr);
+
+		if (!kernel_text_address(entry) ||
+		    !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+			pr_err("Failed to find error inject entry at %p\n",
+				(void *)entry);
+			continue;
+		}
+
+		ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+		if (!ent)
+			break;
+		ent->start_addr = entry;
+		ent->end_addr = entry + size;
+		ent->etype = iter->etype;
+		ent->priv = priv;
+		INIT_LIST_HEAD(&ent->list);
+		list_add_tail(&ent->list, &error_injection_list);
+	}
+	mutex_unlock(&ei_mutex);
+}
+
+/* Markers of the _error_inject_whitelist section */
+extern struct error_injection_entry __start_error_injection_whitelist[];
+extern struct error_injection_entry __stop_error_injection_whitelist[];
+
+static void __init populate_kernel_ei_list(void)
+{
+	populate_error_injection_list(__start_error_injection_whitelist,
+				      __stop_error_injection_whitelist,
+				      NULL);
+}
+
+#ifdef CONFIG_MODULES
+static void module_load_ei_list(struct module *mod)
+{
+	if (!mod->num_ei_funcs)
+		return;
+
+	populate_error_injection_list(mod->ei_funcs,
+				      mod->ei_funcs + mod->num_ei_funcs, mod);
+}
+
+static void module_unload_ei_list(struct module *mod)
+{
+	struct ei_entry *ent, *n;
+
+	if (!mod->num_ei_funcs)
+		return;
+
+	mutex_lock(&ei_mutex);
+	list_for_each_entry_safe(ent, n, &error_injection_list, list) {
+		if (ent->priv == mod) {
+			list_del_init(&ent->list);
+			kfree(ent);
+		}
+	}
+	mutex_unlock(&ei_mutex);
+}
+
+/* Module notifier call back, checking error injection table on the module */
+static int ei_module_callback(struct notifier_block *nb,
+			      unsigned long val, void *data)
+{
+	struct module *mod = data;
+
+	if (val == MODULE_STATE_COMING)
+		module_load_ei_list(mod);
+	else if (val == MODULE_STATE_GOING)
+		module_unload_ei_list(mod);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block ei_module_nb = {
+	.notifier_call = ei_module_callback,
+	.priority = 0
+};
+
+static __init int module_ei_init(void)
+{
+	return register_module_notifier(&ei_module_nb);
+}
+#else /* !CONFIG_MODULES */
+#define module_ei_init()	(0)
+#endif
+
+/*
+ * error_injection/whitelist -- shows which functions can be overridden for
+ * error injection.
+ */
+static void *ei_seq_start(struct seq_file *m, loff_t *pos)
+{
+	mutex_lock(&ei_mutex);
+	return seq_list_start(&error_injection_list, *pos);
+}
+
+static void ei_seq_stop(struct seq_file *m, void *v)
+{
+	mutex_unlock(&ei_mutex);
+}
+
+static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	return seq_list_next(v, &error_injection_list, pos);
+}
+
+static const char *error_type_string(int etype)
+{
+	switch (etype) {
+	case EI_ETYPE_NULL:
+		return "NULL";
+	case EI_ETYPE_ERRNO:
+		return "ERRNO";
+	case EI_ETYPE_ERRNO_NULL:
+		return "ERRNO_NULL";
+	default:
+		return "(unknown)";
+	}
+}
+
+static int ei_seq_show(struct seq_file *m, void *v)
+{
+	struct ei_entry *ent = list_entry(v, struct ei_entry, list);
+
+	seq_printf(m, "%pf\t%s\n", (void *)ent->start_addr,
+		   error_type_string(ent->etype));
+	return 0;
+}
+
+static const struct seq_operations ei_seq_ops = {
+	.start = ei_seq_start,
+	.next  = ei_seq_next,
+	.stop  = ei_seq_stop,
+	.show  = ei_seq_show,
+};
+
+static int ei_open(struct inode *inode, struct file *filp)
+{
+	return seq_open(filp, &ei_seq_ops);
+}
+
+static const struct file_operations debugfs_ei_ops = {
+	.open           = ei_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = seq_release,
+};
+
+static int __init ei_debugfs_init(void)
+{
+	struct dentry *dir, *file;
+
+	dir = debugfs_create_dir("error_injection", NULL);
+	if (!dir)
+		return -ENOMEM;
+
+	file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops);
+	if (!file) {
+		debugfs_remove(dir);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int __init init_error_injection(void)
+{
+	populate_kernel_ei_list();
+
+	if (!module_ei_init())
+		ei_debugfs_init();
+
+	return 0;
+}
+late_initcall(init_error_injection);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index d73c142..f656ca2 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -127,7 +127,7 @@
 /* GFP bitmask for kmemleak internal allocations */
 #define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
 				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
-				 __GFP_NOWARN)
+				 __GFP_NOWARN | __GFP_NOFAIL)
 
 /* scanning area inside a memory block */
 struct kmemleak_scan_area {
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 5f1446c..a662ccc 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -80,7 +80,6 @@ static int vlan_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations vlan_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = vlan_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -97,7 +96,6 @@ static int vlandev_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations vlandev_fops = {
-	.owner = THIS_MODULE,
 	.open    = vlandev_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 325c560..086a4ab 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -543,3 +543,7 @@ static void p9_trans_xen_exit(void)
 	return xenbus_unregister_driver(&xen_9pfs_front_driver);
 }
 module_exit(p9_trans_xen_exit);
+
+MODULE_AUTHOR("Stefano Stabellini <stefano@aporeto.com>");
+MODULE_DESCRIPTION("Xen Transport for 9P");
+MODULE_LICENSE("GPL");
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 309d7db..d4c1021 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -1047,7 +1047,6 @@ static int aarp_seq_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations atalk_seq_arp_fops = {
-	.owner		= THIS_MODULE,
 	.open           = aarp_seq_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index af46bc4..a3bf9d5 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -226,7 +226,6 @@ static int atalk_seq_socket_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations atalk_seq_interface_fops = {
-	.owner		= THIS_MODULE,
 	.open		= atalk_seq_interface_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -234,7 +233,6 @@ static const struct file_operations atalk_seq_interface_fops = {
 };
 
 static const struct file_operations atalk_seq_route_fops = {
-	.owner		= THIS_MODULE,
 	.open		= atalk_seq_route_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -242,7 +240,6 @@ static const struct file_operations atalk_seq_route_fops = {
 };
 
 static const struct file_operations atalk_seq_socket_fops = {
-	.owner		= THIS_MODULE,
 	.open		= atalk_seq_socket_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 4e11119..fd94bea 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -824,7 +824,6 @@ static int br2684_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations br2684_proc_ops = {
-	.owner = THIS_MODULE,
 	.open = br2684_proc_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 6676e34..09a1f05 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -992,7 +992,6 @@ static int lec_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations lec_seq_fops = {
-	.owner = THIS_MODULE,
 	.open = lec_seq_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 2212da9..b93cc0f 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -57,7 +57,6 @@ static int parse_qos(const char *buff);
  *   Define allowed FILE OPERATIONS
  */
 static const struct file_operations mpc_file_operations = {
-	.owner =	THIS_MODULE,
 	.open =		proc_mpc_open,
 	.read =		seq_read,
 	.llseek =	seq_lseek,
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 642f927..edc48ed 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -37,7 +37,6 @@ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
 				 size_t count, loff_t *pos);
 
 static const struct file_operations proc_atm_dev_ops = {
-	.owner =	THIS_MODULE,
 	.read =		proc_dev_atm_read,
 	.llseek =	noop_llseek,
 };
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 06eac1f..47fdd39 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1931,7 +1931,6 @@ static int ax25_info_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ax25_info_fops = {
-	.owner = THIS_MODULE,
 	.open = ax25_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 0446b89..5255589 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -336,7 +336,6 @@ static int ax25_rt_info_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations ax25_route_fops = {
-	.owner = THIS_MODULE,
 	.open = ax25_rt_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 83b035f..4ebe91b 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -194,7 +194,6 @@ static int ax25_uid_info_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations ax25_uid_fops = {
-	.owner = THIS_MODULE,
 	.open = ax25_uid_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index bb30822..426a92f 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -527,7 +527,6 @@ static int cmtp_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations cmtp_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= cmtp_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 43ba91c..fc6615d 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3363,9 +3363,10 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
 			break;
 
 		case L2CAP_CONF_EFS:
-			remote_efs = 1;
-			if (olen == sizeof(efs))
+			if (olen == sizeof(efs)) {
+				remote_efs = 1;
 				memcpy(&efs, (void *) val, olen);
+			}
 			break;
 
 		case L2CAP_CONF_EWS:
@@ -3584,16 +3585,17 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
 			break;
 
 		case L2CAP_CONF_EFS:
-			if (olen == sizeof(efs))
+			if (olen == sizeof(efs)) {
 				memcpy(&efs, (void *)val, olen);
 
-			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
-			    efs.stype != L2CAP_SERV_NOTRAFIC &&
-			    efs.stype != chan->local_stype)
-				return -ECONNREFUSED;
+				if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+				    efs.stype != L2CAP_SERV_NOTRAFIC &&
+				    efs.stype != chan->local_stype)
+					return -ECONNREFUSED;
 
-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
-					   (unsigned long) &efs, endptr - ptr);
+				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+						   (unsigned long) &efs, endptr - ptr);
+			}
 			break;
 
 		case L2CAP_CONF_FCS:
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 13690334..ac5e5e3 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -246,7 +246,6 @@ static int bcm_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations bcm_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= bcm_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/can/proc.c b/net/can/proc.c
index 45e38a3..fdf704e 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -276,7 +276,6 @@ static int can_stats_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations can_stats_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= can_stats_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -310,7 +309,6 @@ static int can_reset_stats_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations can_reset_stats_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= can_reset_stats_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -329,7 +327,6 @@ static int can_version_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations can_version_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= can_version_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -382,7 +379,6 @@ static int can_rcvlist_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations can_rcvlist_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= can_rcvlist_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -450,7 +446,6 @@ static int can_rcvlist_sff_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations can_rcvlist_sff_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= can_rcvlist_sff_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -494,7 +489,6 @@ static int can_rcvlist_eff_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations can_rcvlist_eff_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= can_rcvlist_eff_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 7d430c1..dd7d6dd 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -92,12 +92,6 @@ static LIST_HEAD(devlink_list);
  */
 static DEFINE_MUTEX(devlink_mutex);
 
-/* devlink_port_mutex
- *
- * Shared lock to guard lists of ports in all devlink devices.
- */
-static DEFINE_MUTEX(devlink_port_mutex);
-
 static struct net *devlink_net(const struct devlink *devlink)
 {
 	return read_pnet(&devlink->_net);
@@ -335,15 +329,18 @@ devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
 #define DEVLINK_NL_FLAG_NEED_DEVLINK	BIT(0)
 #define DEVLINK_NL_FLAG_NEED_PORT	BIT(1)
 #define DEVLINK_NL_FLAG_NEED_SB		BIT(2)
-#define DEVLINK_NL_FLAG_LOCK_PORTS	BIT(3)
-	/* port is not needed but we need to ensure they don't
-	 * change in the middle of command
-	 */
+
+/* The per devlink instance lock is taken by default in the pre-doit
+ * operation, yet several commands do not require this. The global
+ * devlink lock is taken and protects from disruption by user-calls.
+ */
+#define DEVLINK_NL_FLAG_NO_LOCK		BIT(3)
 
 static int devlink_nl_pre_doit(const struct genl_ops *ops,
 			       struct sk_buff *skb, struct genl_info *info)
 {
 	struct devlink *devlink;
+	int err;
 
 	mutex_lock(&devlink_mutex);
 	devlink = devlink_get_from_info(info);
@@ -351,44 +348,47 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
 		mutex_unlock(&devlink_mutex);
 		return PTR_ERR(devlink);
 	}
+	if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
+		mutex_lock(&devlink->lock);
 	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) {
 		info->user_ptr[0] = devlink;
 	} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
 		struct devlink_port *devlink_port;
 
-		mutex_lock(&devlink_port_mutex);
 		devlink_port = devlink_port_get_from_info(devlink, info);
 		if (IS_ERR(devlink_port)) {
-			mutex_unlock(&devlink_port_mutex);
-			mutex_unlock(&devlink_mutex);
-			return PTR_ERR(devlink_port);
+			err = PTR_ERR(devlink_port);
+			goto unlock;
 		}
 		info->user_ptr[0] = devlink_port;
 	}
-	if (ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS) {
-		mutex_lock(&devlink_port_mutex);
-	}
 	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) {
 		struct devlink_sb *devlink_sb;
 
 		devlink_sb = devlink_sb_get_from_info(devlink, info);
 		if (IS_ERR(devlink_sb)) {
-			if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT)
-				mutex_unlock(&devlink_port_mutex);
-			mutex_unlock(&devlink_mutex);
-			return PTR_ERR(devlink_sb);
+			err = PTR_ERR(devlink_sb);
+			goto unlock;
 		}
 		info->user_ptr[1] = devlink_sb;
 	}
 	return 0;
+
+unlock:
+	if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
+		mutex_unlock(&devlink->lock);
+	mutex_unlock(&devlink_mutex);
+	return err;
 }
 
 static void devlink_nl_post_doit(const struct genl_ops *ops,
 				 struct sk_buff *skb, struct genl_info *info)
 {
-	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT ||
-	    ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS)
-		mutex_unlock(&devlink_port_mutex);
+	struct devlink *devlink;
+
+	devlink = devlink_get_from_info(info);
+	if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
+		mutex_unlock(&devlink->lock);
 	mutex_unlock(&devlink_mutex);
 }
 
@@ -614,10 +614,10 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
 	int err;
 
 	mutex_lock(&devlink_mutex);
-	mutex_lock(&devlink_port_mutex);
 	list_for_each_entry(devlink, &devlink_list, list) {
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
 			continue;
+		mutex_lock(&devlink->lock);
 		list_for_each_entry(devlink_port, &devlink->port_list, list) {
 			if (idx < start) {
 				idx++;
@@ -628,13 +628,15 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
 						   NETLINK_CB(cb->skb).portid,
 						   cb->nlh->nlmsg_seq,
 						   NLM_F_MULTI);
-			if (err)
+			if (err) {
+				mutex_unlock(&devlink->lock);
 				goto out;
+			}
 			idx++;
 		}
+		mutex_unlock(&devlink->lock);
 	}
 out:
-	mutex_unlock(&devlink_port_mutex);
 	mutex_unlock(&devlink_mutex);
 
 	cb->args[0] = idx;
@@ -801,6 +803,7 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
 	list_for_each_entry(devlink, &devlink_list, list) {
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
 			continue;
+		mutex_lock(&devlink->lock);
 		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
 			if (idx < start) {
 				idx++;
@@ -811,10 +814,13 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
 						 NETLINK_CB(cb->skb).portid,
 						 cb->nlh->nlmsg_seq,
 						 NLM_F_MULTI);
-			if (err)
+			if (err) {
+				mutex_unlock(&devlink->lock);
 				goto out;
+			}
 			idx++;
 		}
+		mutex_unlock(&devlink->lock);
 	}
 out:
 	mutex_unlock(&devlink_mutex);
@@ -935,14 +941,18 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
 		    !devlink->ops || !devlink->ops->sb_pool_get)
 			continue;
+		mutex_lock(&devlink->lock);
 		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
 			err = __sb_pool_get_dumpit(msg, start, &idx, devlink,
 						   devlink_sb,
 						   NETLINK_CB(cb->skb).portid,
 						   cb->nlh->nlmsg_seq);
-			if (err && err != -EOPNOTSUPP)
+			if (err && err != -EOPNOTSUPP) {
+				mutex_unlock(&devlink->lock);
 				goto out;
+			}
 		}
+		mutex_unlock(&devlink->lock);
 	}
 out:
 	mutex_unlock(&devlink_mutex);
@@ -1123,22 +1133,24 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
 	int err;
 
 	mutex_lock(&devlink_mutex);
-	mutex_lock(&devlink_port_mutex);
 	list_for_each_entry(devlink, &devlink_list, list) {
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
 		    !devlink->ops || !devlink->ops->sb_port_pool_get)
 			continue;
+		mutex_lock(&devlink->lock);
 		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
 			err = __sb_port_pool_get_dumpit(msg, start, &idx,
 							devlink, devlink_sb,
 							NETLINK_CB(cb->skb).portid,
 							cb->nlh->nlmsg_seq);
-			if (err && err != -EOPNOTSUPP)
+			if (err && err != -EOPNOTSUPP) {
+				mutex_unlock(&devlink->lock);
 				goto out;
+			}
 		}
+		mutex_unlock(&devlink->lock);
 	}
 out:
-	mutex_unlock(&devlink_port_mutex);
 	mutex_unlock(&devlink_mutex);
 
 	cb->args[0] = idx;
@@ -1347,23 +1359,26 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
 	int err;
 
 	mutex_lock(&devlink_mutex);
-	mutex_lock(&devlink_port_mutex);
 	list_for_each_entry(devlink, &devlink_list, list) {
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
 		    !devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
 			continue;
+
+		mutex_lock(&devlink->lock);
 		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
 			err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx,
 							   devlink,
 							   devlink_sb,
 							   NETLINK_CB(cb->skb).portid,
 							   cb->nlh->nlmsg_seq);
-			if (err && err != -EOPNOTSUPP)
+			if (err && err != -EOPNOTSUPP) {
+				mutex_unlock(&devlink->lock);
 				goto out;
+			}
 		}
+		mutex_unlock(&devlink->lock);
 	}
 out:
-	mutex_unlock(&devlink_port_mutex);
 	mutex_unlock(&devlink_mutex);
 
 	cb->args[0] = idx;
@@ -1679,6 +1694,12 @@ static int devlink_dpipe_table_put(struct sk_buff *skb,
 		       table->counters_enabled))
 		goto nla_put_failure;
 
+	if (table->resource_valid) {
+		nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
+				  table->resource_id, DEVLINK_ATTR_PAD);
+		nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
+				  table->resource_units, DEVLINK_ATTR_PAD);
+	}
 	if (devlink_dpipe_matches_put(table, skb))
 		goto nla_put_failure;
 
@@ -2273,6 +2294,272 @@ static int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
 						counters_enable);
 }
 
+struct devlink_resource *
+devlink_resource_find(struct devlink *devlink,
+		      struct devlink_resource *resource, u64 resource_id)
+{
+	struct list_head *resource_list;
+
+	if (resource)
+		resource_list = &resource->resource_list;
+	else
+		resource_list = &devlink->resource_list;
+
+	list_for_each_entry(resource, resource_list, list) {
+		struct devlink_resource *child_resource;
+
+		if (resource->id == resource_id)
+			return resource;
+
+		child_resource = devlink_resource_find(devlink, resource,
+						       resource_id);
+		if (child_resource)
+			return child_resource;
+	}
+	return NULL;
+}
+
+void devlink_resource_validate_children(struct devlink_resource *resource)
+{
+	struct devlink_resource *child_resource;
+	bool size_valid = true;
+	u64 parts_size = 0;
+
+	if (list_empty(&resource->resource_list))
+		goto out;
+
+	list_for_each_entry(child_resource, &resource->resource_list, list)
+		parts_size += child_resource->size_new;
+
+	if (parts_size > resource->size)
+		size_valid = false;
+out:
+	resource->size_valid = size_valid;
+}
+
+static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
+				       struct genl_info *info)
+{
+	struct devlink *devlink = info->user_ptr[0];
+	struct devlink_resource *resource;
+	u64 resource_id;
+	u64 size;
+	int err;
+
+	if (!info->attrs[DEVLINK_ATTR_RESOURCE_ID] ||
+	    !info->attrs[DEVLINK_ATTR_RESOURCE_SIZE])
+		return -EINVAL;
+	resource_id = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_ID]);
+
+	resource = devlink_resource_find(devlink, NULL, resource_id);
+	if (!resource)
+		return -EINVAL;
+
+	if (!resource->resource_ops->size_validate)
+		return -EINVAL;
+
+	size = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_SIZE]);
+	err = resource->resource_ops->size_validate(devlink, size,
+						    info->extack);
+	if (err)
+		return err;
+
+	resource->size_new = size;
+	devlink_resource_validate_children(resource);
+	if (resource->parent)
+		devlink_resource_validate_children(resource->parent);
+	return 0;
+}
+
+static void
+devlink_resource_size_params_put(struct devlink_resource *resource,
+				 struct sk_buff *skb)
+{
+	struct devlink_resource_size_params *size_params;
+
+	size_params = resource->size_params;
+	nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
+			  size_params->size_granularity, DEVLINK_ATTR_PAD);
+	nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
+			  size_params->size_max, DEVLINK_ATTR_PAD);
+	nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
+			  size_params->size_min, DEVLINK_ATTR_PAD);
+	nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit);
+}
+
+static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
+				struct devlink_resource *resource)
+{
+	struct devlink_resource *child_resource;
+	struct nlattr *child_resource_attr;
+	struct nlattr *resource_attr;
+
+	resource_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE);
+	if (!resource_attr)
+		return -EMSGSIZE;
+
+	if (nla_put_string(skb, DEVLINK_ATTR_RESOURCE_NAME, resource->name) ||
+	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE, resource->size,
+			      DEVLINK_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_ID, resource->id,
+			      DEVLINK_ATTR_PAD))
+		goto nla_put_failure;
+	if (resource->size != resource->size_new)
+		nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
+				  resource->size_new, DEVLINK_ATTR_PAD);
+	if (resource->resource_ops && resource->resource_ops->occ_get)
+		nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
+				  resource->resource_ops->occ_get(devlink),
+				  DEVLINK_ATTR_PAD);
+	devlink_resource_size_params_put(resource, skb);
+	if (list_empty(&resource->resource_list))
+		goto out;
+
+	if (nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_SIZE_VALID,
+		       resource->size_valid))
+		goto nla_put_failure;
+
+	child_resource_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE_LIST);
+	if (!child_resource_attr)
+		goto nla_put_failure;
+
+	list_for_each_entry(child_resource, &resource->resource_list, list) {
+		if (devlink_resource_put(devlink, skb, child_resource))
+			goto resource_put_failure;
+	}
+
+	nla_nest_end(skb, child_resource_attr);
+out:
+	nla_nest_end(skb, resource_attr);
+	return 0;
+
+resource_put_failure:
+	nla_nest_cancel(skb, child_resource_attr);
+nla_put_failure:
+	nla_nest_cancel(skb, resource_attr);
+	return -EMSGSIZE;
+}
+
+static int devlink_resource_fill(struct genl_info *info,
+				 enum devlink_command cmd, int flags)
+{
+	struct devlink *devlink = info->user_ptr[0];
+	struct devlink_resource *resource;
+	struct nlattr *resources_attr;
+	struct sk_buff *skb = NULL;
+	struct nlmsghdr *nlh;
+	bool incomplete;
+	void *hdr;
+	int i;
+	int err;
+
+	resource = list_first_entry(&devlink->resource_list,
+				    struct devlink_resource, list);
+start_again:
+	err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+	if (err)
+		return err;
+
+	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+			  &devlink_nl_family, NLM_F_MULTI, cmd);
+	if (!hdr) {
+		nlmsg_free(skb);
+		return -EMSGSIZE;
+	}
+
+	if (devlink_nl_put_handle(skb, devlink))
+		goto nla_put_failure;
+
+	resources_attr = nla_nest_start(skb, DEVLINK_ATTR_RESOURCE_LIST);
+	if (!resources_attr)
+		goto nla_put_failure;
+
+	incomplete = false;
+	i = 0;
+	list_for_each_entry_from(resource, &devlink->resource_list, list) {
+		err = devlink_resource_put(devlink, skb, resource);
+		if (err) {
+			if (!i)
+				goto err_resource_put;
+			incomplete = true;
+			break;
+		}
+		i++;
+	}
+	nla_nest_end(skb, resources_attr);
+	genlmsg_end(skb, hdr);
+	if (incomplete)
+		goto start_again;
+send_done:
+	nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+			NLMSG_DONE, 0, flags | NLM_F_MULTI);
+	if (!nlh) {
+		err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+		if (err)
+			goto err_skb_send_alloc;
+		goto send_done;
+	}
+	return genlmsg_reply(skb, info);
+
+nla_put_failure:
+	err = -EMSGSIZE;
+err_resource_put:
+err_skb_send_alloc:
+	genlmsg_cancel(skb, hdr);
+	nlmsg_free(skb);
+	return err;
+}
+
+static int devlink_nl_cmd_resource_dump(struct sk_buff *skb,
+					struct genl_info *info)
+{
+	struct devlink *devlink = info->user_ptr[0];
+
+	if (list_empty(&devlink->resource_list))
+		return -EOPNOTSUPP;
+
+	return devlink_resource_fill(info, DEVLINK_CMD_RESOURCE_DUMP, 0);
+}
+
+static int
+devlink_resources_validate(struct devlink *devlink,
+			   struct devlink_resource *resource,
+			   struct genl_info *info)
+{
+	struct list_head *resource_list;
+	int err = 0;
+
+	if (resource)
+		resource_list = &resource->resource_list;
+	else
+		resource_list = &devlink->resource_list;
+
+	list_for_each_entry(resource, resource_list, list) {
+		if (!resource->size_valid)
+			return -EINVAL;
+		err = devlink_resources_validate(devlink, resource, info);
+		if (err)
+			return err;
+	}
+	return err;
+}
+
+static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
+{
+	struct devlink *devlink = info->user_ptr[0];
+	int err;
+
+	if (!devlink->ops->reload)
+		return -EOPNOTSUPP;
+
+	err = devlink_resources_validate(devlink, NULL, info);
+	if (err) {
+		NL_SET_ERR_MSG_MOD(info->extack, "resources size validation failed");
+		return err;
+	}
+	return devlink->ops->reload(devlink);
+}
+
 static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
 	[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
 	[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
@@ -2291,6 +2578,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
 	[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
 	[DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
 	[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
+	[DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
+	[DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
 };
 
 static const struct genl_ops devlink_nl_ops[] = {
@@ -2322,14 +2611,16 @@ static const struct genl_ops devlink_nl_ops[] = {
 		.doit = devlink_nl_cmd_port_split_doit,
 		.policy = devlink_nl_policy,
 		.flags = GENL_ADMIN_PERM,
-		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+				  DEVLINK_NL_FLAG_NO_LOCK,
 	},
 	{
 		.cmd = DEVLINK_CMD_PORT_UNSPLIT,
 		.doit = devlink_nl_cmd_port_unsplit_doit,
 		.policy = devlink_nl_policy,
 		.flags = GENL_ADMIN_PERM,
-		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+				  DEVLINK_NL_FLAG_NO_LOCK,
 	},
 	{
 		.cmd = DEVLINK_CMD_SB_GET,
@@ -2397,8 +2688,7 @@ static const struct genl_ops devlink_nl_ops[] = {
 		.policy = devlink_nl_policy,
 		.flags = GENL_ADMIN_PERM,
 		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-				  DEVLINK_NL_FLAG_NEED_SB |
-				  DEVLINK_NL_FLAG_LOCK_PORTS,
+				  DEVLINK_NL_FLAG_NEED_SB,
 	},
 	{
 		.cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
@@ -2406,8 +2696,7 @@ static const struct genl_ops devlink_nl_ops[] = {
 		.policy = devlink_nl_policy,
 		.flags = GENL_ADMIN_PERM,
 		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-				  DEVLINK_NL_FLAG_NEED_SB |
-				  DEVLINK_NL_FLAG_LOCK_PORTS,
+				  DEVLINK_NL_FLAG_NEED_SB,
 	},
 	{
 		.cmd = DEVLINK_CMD_ESWITCH_GET,
@@ -2451,6 +2740,28 @@ static const struct genl_ops devlink_nl_ops[] = {
 		.flags = GENL_ADMIN_PERM,
 		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
 	},
+	{
+		.cmd = DEVLINK_CMD_RESOURCE_SET,
+		.doit = devlink_nl_cmd_resource_set,
+		.policy = devlink_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+	},
+	{
+		.cmd = DEVLINK_CMD_RESOURCE_DUMP,
+		.doit = devlink_nl_cmd_resource_dump,
+		.policy = devlink_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+	},
+	{
+		.cmd = DEVLINK_CMD_RELOAD,
+		.doit = devlink_nl_cmd_reload,
+		.policy = devlink_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+				  DEVLINK_NL_FLAG_NO_LOCK,
+	},
 };
 
 static struct genl_family devlink_nl_family __ro_after_init = {
@@ -2488,6 +2799,8 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
 	INIT_LIST_HEAD(&devlink->port_list);
 	INIT_LIST_HEAD(&devlink->sb_list);
 	INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
+	INIT_LIST_HEAD(&devlink->resource_list);
+	mutex_init(&devlink->lock);
 	return devlink;
 }
 EXPORT_SYMBOL_GPL(devlink_alloc);
@@ -2550,16 +2863,16 @@ int devlink_port_register(struct devlink *devlink,
 			  struct devlink_port *devlink_port,
 			  unsigned int port_index)
 {
-	mutex_lock(&devlink_port_mutex);
+	mutex_lock(&devlink->lock);
 	if (devlink_port_index_exists(devlink, port_index)) {
-		mutex_unlock(&devlink_port_mutex);
+		mutex_unlock(&devlink->lock);
 		return -EEXIST;
 	}
 	devlink_port->devlink = devlink;
 	devlink_port->index = port_index;
 	devlink_port->registered = true;
 	list_add_tail(&devlink_port->list, &devlink->port_list);
-	mutex_unlock(&devlink_port_mutex);
+	mutex_unlock(&devlink->lock);
 	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
 	return 0;
 }
@@ -2572,10 +2885,12 @@ EXPORT_SYMBOL_GPL(devlink_port_register);
  */
 void devlink_port_unregister(struct devlink_port *devlink_port)
 {
+	struct devlink *devlink = devlink_port->devlink;
+
 	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
-	mutex_lock(&devlink_port_mutex);
+	mutex_lock(&devlink->lock);
 	list_del(&devlink_port->list);
-	mutex_unlock(&devlink_port_mutex);
+	mutex_unlock(&devlink->lock);
 }
 EXPORT_SYMBOL_GPL(devlink_port_unregister);
 
@@ -2651,7 +2966,7 @@ int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
 	struct devlink_sb *devlink_sb;
 	int err = 0;
 
-	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink->lock);
 	if (devlink_sb_index_exists(devlink, sb_index)) {
 		err = -EEXIST;
 		goto unlock;
@@ -2670,7 +2985,7 @@ int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
 	devlink_sb->egress_tc_count = egress_tc_count;
 	list_add_tail(&devlink_sb->list, &devlink->sb_list);
 unlock:
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 	return err;
 }
 EXPORT_SYMBOL_GPL(devlink_sb_register);
@@ -2679,11 +2994,11 @@ void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
 {
 	struct devlink_sb *devlink_sb;
 
-	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink->lock);
 	devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
 	WARN_ON(!devlink_sb);
 	list_del(&devlink_sb->list);
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 	kfree(devlink_sb);
 }
 EXPORT_SYMBOL_GPL(devlink_sb_unregister);
@@ -2699,9 +3014,9 @@ EXPORT_SYMBOL_GPL(devlink_sb_unregister);
 int devlink_dpipe_headers_register(struct devlink *devlink,
 				   struct devlink_dpipe_headers *dpipe_headers)
 {
-	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink->lock);
 	devlink->dpipe_headers = dpipe_headers;
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(devlink_dpipe_headers_register);
@@ -2715,9 +3030,9 @@ EXPORT_SYMBOL_GPL(devlink_dpipe_headers_register);
  */
 void devlink_dpipe_headers_unregister(struct devlink *devlink)
 {
-	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink->lock);
 	devlink->dpipe_headers = NULL;
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 }
 EXPORT_SYMBOL_GPL(devlink_dpipe_headers_unregister);
 
@@ -2783,9 +3098,9 @@ int devlink_dpipe_table_register(struct devlink *devlink,
 	table->priv = priv;
 	table->counter_control_extern = counter_control_extern;
 
-	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink->lock);
 	list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(devlink_dpipe_table_register);
@@ -2801,20 +3116,181 @@ void devlink_dpipe_table_unregister(struct devlink *devlink,
 {
 	struct devlink_dpipe_table *table;
 
-	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink->lock);
 	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
 					 table_name);
 	if (!table)
 		goto unlock;
 	list_del_rcu(&table->list);
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 	kfree_rcu(table, rcu);
 	return;
 unlock:
-	mutex_unlock(&devlink_mutex);
+	mutex_unlock(&devlink->lock);
 }
 EXPORT_SYMBOL_GPL(devlink_dpipe_table_unregister);
 
+/**
+ *	devlink_resource_register - devlink resource register
+ *
+ *	@devlink: devlink
+ *	@resource_name: resource's name
+ *	@top_hierarchy: top hierarchy
+ *	@reload_required: reload is required for new configuration to
+ *			  apply
+ *	@resource_size: resource's size
+ *	@resource_id: resource's id
+ *	@parent_reosurce_id: resource's parent id
+ *	@size params: size parameters
+ *	@resource_ops: resource ops
+ */
+int devlink_resource_register(struct devlink *devlink,
+			      const char *resource_name,
+			      bool top_hierarchy,
+			      u64 resource_size,
+			      u64 resource_id,
+			      u64 parent_resource_id,
+			      struct devlink_resource_size_params *size_params,
+			      const struct devlink_resource_ops *resource_ops)
+{
+	struct devlink_resource *resource;
+	struct list_head *resource_list;
+	int err = 0;
+
+	mutex_lock(&devlink->lock);
+	resource = devlink_resource_find(devlink, NULL, resource_id);
+	if (resource) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+	if (!resource) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	if (top_hierarchy) {
+		resource_list = &devlink->resource_list;
+	} else {
+		struct devlink_resource *parent_resource;
+
+		parent_resource = devlink_resource_find(devlink, NULL,
+							parent_resource_id);
+		if (parent_resource) {
+			resource_list = &parent_resource->resource_list;
+			resource->parent = parent_resource;
+		} else {
+			err = -EINVAL;
+			goto out;
+		}
+	}
+
+	resource->name = resource_name;
+	resource->size = resource_size;
+	resource->size_new = resource_size;
+	resource->id = resource_id;
+	resource->resource_ops = resource_ops;
+	resource->size_valid = true;
+	resource->size_params = size_params;
+	INIT_LIST_HEAD(&resource->resource_list);
+	list_add_tail(&resource->list, resource_list);
+out:
+	mutex_unlock(&devlink->lock);
+	return err;
+}
+EXPORT_SYMBOL_GPL(devlink_resource_register);
+
+/**
+ *	devlink_resources_unregister - free all resources
+ *
+ *	@devlink: devlink
+ *	@resource: resource
+ */
+void devlink_resources_unregister(struct devlink *devlink,
+				  struct devlink_resource *resource)
+{
+	struct devlink_resource *tmp, *child_resource;
+	struct list_head *resource_list;
+
+	if (resource)
+		resource_list = &resource->resource_list;
+	else
+		resource_list = &devlink->resource_list;
+
+	if (!resource)
+		mutex_lock(&devlink->lock);
+
+	list_for_each_entry_safe(child_resource, tmp, resource_list, list) {
+		devlink_resources_unregister(devlink, child_resource);
+		list_del(&child_resource->list);
+		kfree(child_resource);
+	}
+
+	if (!resource)
+		mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devlink_resources_unregister);
+
+/**
+ *	devlink_resource_size_get - get and update size
+ *
+ *	@devlink: devlink
+ *	@resource_id: the requested resource id
+ *	@p_resource_size: ptr to update
+ */
+int devlink_resource_size_get(struct devlink *devlink,
+			      u64 resource_id,
+			      u64 *p_resource_size)
+{
+	struct devlink_resource *resource;
+	int err = 0;
+
+	mutex_lock(&devlink->lock);
+	resource = devlink_resource_find(devlink, NULL, resource_id);
+	if (!resource) {
+		err = -EINVAL;
+		goto out;
+	}
+	*p_resource_size = resource->size_new;
+	resource->size = resource->size_new;
+out:
+	mutex_unlock(&devlink->lock);
+	return err;
+}
+EXPORT_SYMBOL_GPL(devlink_resource_size_get);
+
+/**
+ *	devlink_dpipe_table_resource_set - set the resource id
+ *
+ *	@devlink: devlink
+ *	@table_name: table name
+ *	@resource_id: resource id
+ *	@resource_units: number of resource's units consumed per table's entry
+ */
+int devlink_dpipe_table_resource_set(struct devlink *devlink,
+				     const char *table_name, u64 resource_id,
+				     u64 resource_units)
+{
+	struct devlink_dpipe_table *table;
+	int err = 0;
+
+	mutex_lock(&devlink->lock);
+	table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+					 table_name);
+	if (!table) {
+		err = -EINVAL;
+		goto out;
+	}
+	table->resource_id = resource_id;
+	table->resource_units = resource_units;
+	table->resource_valid = true;
+out:
+	mutex_unlock(&devlink->lock);
+	return err;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_table_resource_set);
+
 static int __init devlink_module_init(void)
 {
 	return genl_register_family(&devlink_nl_family);
diff --git a/net/core/filter.c b/net/core/filter.c
index d4b190e..db2ee8c 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4310,16 +4310,15 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
 				      si->dst_reg, si->dst_reg,
 				      offsetof(struct xdp_rxq_info, dev));
 		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
-				      bpf_target_off(struct net_device,
-						     ifindex, 4, target_size));
+				      offsetof(struct net_device, ifindex));
 		break;
 	case offsetof(struct xdp_md, rx_queue_index):
 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
 				      si->dst_reg, si->src_reg,
 				      offsetof(struct xdp_buff, rxq));
 		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
-				      bpf_target_off(struct xdp_rxq_info,
-						queue_index, 4, target_size));
+				      offsetof(struct xdp_rxq_info,
+					       queue_index));
 		break;
 	}
 
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index d1f5fe9..7b7a14a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -532,7 +532,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
 
-	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
 
 	if (n->parms->dead) {
 		rc = ERR_PTR(-EINVAL);
@@ -544,7 +544,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
 	     n1 != NULL;
 	     n1 = rcu_dereference_protected(n1->next,
 			lockdep_is_held(&tbl->lock))) {
-		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
+		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
 			if (want_ref)
 				neigh_hold(n1);
 			rc = n1;
@@ -2862,7 +2862,6 @@ static int neigh_stat_seq_open(struct inode *inode, struct file *file)
 };
 
 static const struct file_operations neigh_stat_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open 	 = neigh_stat_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 615ccab..e010bb8 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -182,7 +182,6 @@ static int dev_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dev_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = dev_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -202,7 +201,6 @@ static int softnet_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations softnet_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = softnet_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -306,7 +304,6 @@ static int ptype_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ptype_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = ptype_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -387,7 +384,6 @@ static int dev_mc_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dev_mc_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = dev_mc_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 2213d45..1ccb953 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -221,17 +221,26 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
  */
 int peernet2id_alloc(struct net *net, struct net *peer)
 {
-	bool alloc;
+	bool alloc = false, alive = false;
 	int id;
 
 	if (refcount_read(&net->count) == 0)
 		return NETNSA_NSID_NOT_ASSIGNED;
 	spin_lock_bh(&net->nsid_lock);
-	alloc = refcount_read(&peer->count) == 0 ? false : true;
+	/*
+	 * When peer is obtained from RCU lists, we may race with
+	 * its cleanup. Check whether it's alive, and this guarantees
+	 * we never hash a peer back to net->netns_ids, after it has
+	 * just been idr_remove()'d from there in cleanup_net().
+	 */
+	if (maybe_get_net(peer))
+		alive = alloc = true;
 	id = __peernet2id_alloc(net, peer, &alloc);
 	spin_unlock_bh(&net->nsid_lock);
 	if (alloc && id >= 0)
 		rtnl_net_notifyid(net, RTM_NEWNSID, id);
+	if (alive)
+		put_net(peer);
 	return id;
 }
 EXPORT_SYMBOL_GPL(peernet2id_alloc);
@@ -264,11 +273,9 @@ struct net *get_net_ns_by_id(struct net *net, int id)
 		return NULL;
 
 	rcu_read_lock();
-	spin_lock_bh(&net->nsid_lock);
 	peer = idr_find(&net->netns_ids, id);
 	if (peer)
 		peer = maybe_get_net(peer);
-	spin_unlock_bh(&net->nsid_lock);
 	rcu_read_unlock();
 
 	return peer;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b9ce241c..4fcfcb1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -523,7 +523,6 @@ static int pgctrl_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations pktgen_fops = {
-	.owner   = THIS_MODULE,
 	.open    = pgctrl_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -1804,7 +1803,6 @@ static int pktgen_if_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations pktgen_if_fops = {
-	.owner   = THIS_MODULE,
 	.open    = pktgen_if_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -1942,7 +1940,6 @@ static int pktgen_thread_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations pktgen_thread_fops = {
-	.owner   = THIS_MODULE,
 	.open    = pktgen_thread_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/core/sock.c b/net/core/sock.c
index 72d14b2..abf4cbf 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3362,7 +3362,6 @@ static int proto_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations proto_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= proto_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 518cea1..d93e5b8 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2320,7 +2320,6 @@ static int dn_socket_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dn_socket_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= dn_socket_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index d1885cf..c9f5e1e 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1389,7 +1389,6 @@ static int dn_dev_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dn_dev_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = dn_dev_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 528119a..6e37d9e 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -597,7 +597,6 @@ static int dn_neigh_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dn_neigh_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= dn_neigh_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 73160d4..ef20b8e 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1860,7 +1860,6 @@ static int dn_rt_cache_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dn_rt_cache_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = dn_rt_cache_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a8d7c5a..f28f06c 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
 
 static int arp_constructor(struct neighbour *neigh)
 {
-	__be32 addr = *(__be32 *)neigh->primary_key;
+	__be32 addr;
 	struct net_device *dev = neigh->dev;
 	struct in_device *in_dev;
 	struct neigh_parms *parms;
+	u32 inaddr_any = INADDR_ANY;
 
+	if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
+		memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
+
+	addr = *(__be32 *)neigh->primary_key;
 	rcu_read_lock();
 	in_dev = __in_dev_get_rcu(dev);
 	if (!in_dev) {
@@ -1420,7 +1425,6 @@ static int arp_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations arp_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open           = arp_seq_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 6f00e43..296d0b9 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -991,6 +991,7 @@ static int esp_init_state(struct xfrm_state *x)
 
 		switch (encap->encap_type) {
 		default:
+			err = -EINVAL;
 			goto error;
 		case UDP_ENCAP_ESPINUDP:
 			x->props.header_len += sizeof(struct udphdr);
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index c359f3c..32fbd9b 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -38,7 +38,8 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
 	__be32 spi;
 	int err;
 
-	skb_pull(skb, offset);
+	if (!pskb_pull(skb, offset))
+		return NULL;
 
 	if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
 		goto out;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 5ddc4aef..5530cd6 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2334,7 +2334,6 @@ static int fib_triestat_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations fib_triestat_fops = {
-	.owner	= THIS_MODULE,
 	.open	= fib_triestat_seq_open,
 	.read	= seq_read,
 	.llseek	= seq_lseek,
@@ -2521,7 +2520,6 @@ static int fib_trie_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations fib_trie_fops = {
-	.owner  = THIS_MODULE,
 	.open   = fib_trie_seq_open,
 	.read   = seq_read,
 	.llseek = seq_lseek,
@@ -2715,7 +2713,6 @@ static int fib_route_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations fib_route_fops = {
-	.owner  = THIS_MODULE,
 	.open   = fib_route_seq_open,
 	.read   = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 726f6b6..02f00be 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2832,7 +2832,6 @@ static int igmp_mc_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations igmp_mc_seq_fops = {
-	.owner		=	THIS_MODULE,
 	.open		=	igmp_mc_seq_open,
 	.read		=	seq_read,
 	.llseek		=	seq_lseek,
@@ -2979,7 +2978,6 @@ static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations igmp_mcf_seq_fops = {
-	.owner		=	THIS_MODULE,
 	.open		=	igmp_mcf_seq_open,
 	.read		=	seq_read,
 	.llseek		=	seq_lseek,
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index abdebca..e9e488e 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1322,7 +1322,6 @@ static int pnp_seq_open(struct inode *indoe, struct file *file)
 }
 
 static const struct file_operations pnp_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= pnp_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index fd5f19c..a819fab 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -3045,7 +3045,6 @@ static int ipmr_vif_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ipmr_vif_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = ipmr_vif_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -3198,7 +3197,6 @@ static int ipmr_mfc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ipmr_mfc_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = ipmr_mfc_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 69060e3..c29a6ca 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -776,7 +776,6 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
 }
 
 static const struct file_operations clusterip_proc_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = clusterip_proc_open,
 	.read	 = seq_read,
 	.write	 = clusterip_proc_write,
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 9f37c47..dc5edc8 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -83,7 +83,6 @@ static int sockstat_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations sockstat_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = sockstat_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -467,7 +466,6 @@ static int snmp_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations snmp_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = snmp_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -515,7 +513,6 @@ static int netstat_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations netstat_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = netstat_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 5e570aa..136544b 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -1119,7 +1119,6 @@ static int raw_v4_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations raw_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = raw_v4_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f0ed031..49cc1c1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -240,7 +240,6 @@ static int rt_cache_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations rt_cache_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = rt_cache_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -331,7 +330,6 @@ static int rt_cpu_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations rt_cpu_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = rt_cpu_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -369,7 +367,6 @@ static int rt_acct_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations rt_acct_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= rt_acct_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -2762,6 +2759,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 		if (err == 0 && rt->dst.error)
 			err = -rt->dst.error;
 	} else {
+		fl4.flowi4_iif = LOOPBACK_IFINDEX;
 		rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
 		err = 0;
 		if (IS_ERR(rt))
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5d20324..95738aa 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2358,7 +2358,6 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
 }
 
 static const struct file_operations tcp_afinfo_seq_fops = {
-	.owner   = THIS_MODULE,
 	.open    = tcp_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index db72619..8533215 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2714,7 +2714,6 @@ int udp4_seq_show(struct seq_file *seq, void *v)
 }
 
 static const struct file_operations udp_afinfo_seq_fops = {
-	.owner    = THIS_MODULE,
 	.open     = udp_seq_open,
 	.read     = seq_read,
 	.llseek   = seq_lseek,
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 59f10fe..f96614e 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -75,7 +75,6 @@ static struct inet_protosw udplite4_protosw = {
 #ifdef CONFIG_PROC_FS
 
 static const struct file_operations udplite_afinfo_seq_fops = {
-	.owner    = THIS_MODULE,
 	.open     = udp_seq_open,
 	.read     = seq_read,
 	.llseek   = seq_lseek,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2435f7a..ab99cb6 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4214,7 +4214,6 @@ static int if6_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations if6_fops = {
-	.owner		= THIS_MODULE,
 	.open		= if6_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 0bbab8a..8e085cc 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -533,7 +533,6 @@ static int ac6_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ac6_seq_fops = {
-	.owner		=	THIS_MODULE,
 	.open		=	ac6_seq_open,
 	.read		=	seq_read,
 	.llseek		=	seq_lseek,
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 7c888c6..97513f3 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -900,13 +900,12 @@ static int esp6_init_state(struct xfrm_state *x)
 			x->props.header_len += IPV4_BEET_PHMAXLEN +
 					       (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
 		break;
+	default:
 	case XFRM_MODE_TRANSPORT:
 		break;
 	case XFRM_MODE_TUNNEL:
 		x->props.header_len += sizeof(struct ipv6hdr);
 		break;
-	default:
-		goto error;
 	}
 
 	align = ALIGN(crypto_aead_blocksize(aead), 4);
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 0bb7d54..44d109c 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -60,7 +60,8 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
 	int nhoff;
 	int err;
 
-	skb_pull(skb, offset);
+	if (!pskb_pull(skb, offset))
+		return NULL;
 
 	if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
 		goto out;
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 7f59c8f..3dab664 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -836,7 +836,6 @@ static int ip6fl_seq_release(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ip6fl_seq_fops = {
-	.owner		=	THIS_MODULE,
 	.open		=	ip6fl_seq_open,
 	.read		=	seq_read,
 	.llseek		=	seq_lseek,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 18547a4..a4a9445 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1215,14 +1215,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
 	v6_cork->tclass = ipc6->tclass;
 	if (rt->dst.flags & DST_XFRM_TUNNEL)
 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
-		      rt->dst.dev->mtu : dst_mtu(&rt->dst);
+		      READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
 	else
 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
-		      rt->dst.dev->mtu : dst_mtu(xfrm_dst_path(&rt->dst));
+			READ_ONCE(rt->dst.dev->mtu) : dst_mtu(xfrm_dst_path(&rt->dst));
 	if (np->frag_size < mtu) {
 		if (np->frag_size)
 			mtu = np->frag_size;
 	}
+	if (mtu < IPV6_MIN_MTU)
+		return -EINVAL;
 	cork->base.fragsize = mtu;
 	if (dst_allfrag(xfrm_dst_path(&rt->dst)))
 		cork->base.flags |= IPCORK_ALLFRAG;
@@ -1742,6 +1744,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
 	cork.base.flags = 0;
 	cork.base.addr = 0;
 	cork.base.opt = NULL;
+	cork.base.dst = NULL;
 	v6_cork.opt = NULL;
 	err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
 	if (err) {
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 890f9bda..754ef84 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -477,7 +477,6 @@ static int ip6mr_vif_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ip6mr_vif_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = ip6mr_vif_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
@@ -609,7 +608,6 @@ static int ipmr_mfc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ip6mr_mfc_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = ipmr_mfc_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 8446426..40b223a 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2758,7 +2758,6 @@ static int igmp6_mc_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations igmp6_mc_seq_fops = {
-	.owner		=	THIS_MODULE,
 	.open		=	igmp6_mc_seq_open,
 	.read		=	seq_read,
 	.llseek		=	seq_lseek,
@@ -2913,7 +2912,6 @@ static int igmp6_mcf_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations igmp6_mcf_seq_fops = {
-	.owner		=	THIS_MODULE,
 	.open		=	igmp6_mcf_seq_open,
 	.read		=	seq_read,
 	.llseek		=	seq_lseek,
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index e88bcb8..b678142 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -58,7 +58,6 @@ static int sockstat6_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations sockstat6_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = sockstat6_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -248,7 +247,6 @@ static int snmp6_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations snmp6_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = snmp6_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
@@ -274,7 +272,6 @@ static int snmp6_dev_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations snmp6_dev_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = snmp6_dev_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 761a473..ddda7eb 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1308,7 +1308,6 @@ static int raw6_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations raw6_seq_fops = {
-	.owner =	THIS_MODULE,
 	.open =		raw6_seq_open,
 	.read =		seq_read,
 	.llseek =	seq_lseek,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c37bd95..f85da2f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -4648,7 +4648,6 @@ static int ip6_route_dev_notify(struct notifier_block *this,
 #ifdef CONFIG_PROC_FS
 
 static const struct file_operations ipv6_route_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= ipv6_route_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -4676,7 +4675,6 @@ static int rt6_stats_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations rt6_stats_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = rt6_stats_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c0f7e69..a1ab29e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1883,7 +1883,6 @@ static int tcp6_seq_show(struct seq_file *seq, void *v)
 }
 
 static const struct file_operations tcp6_afinfo_seq_fops = {
-	.owner   = THIS_MODULE,
 	.open    = tcp_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index eecf9f0..52e3ea0 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1479,7 +1479,6 @@ int udp6_seq_show(struct seq_file *seq, void *v)
 }
 
 static const struct file_operations udp6_afinfo_seq_fops = {
-	.owner    = THIS_MODULE,
 	.open     = udp_seq_open,
 	.read     = seq_read,
 	.llseek   = seq_lseek,
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 2784cc3..14ae32b 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -94,7 +94,6 @@ void udplitev6_exit(void)
 #ifdef CONFIG_PROC_FS
 
 static const struct file_operations udplite6_afinfo_seq_fops = {
-	.owner    = THIS_MODULE,
 	.open     = udp_seq_open,
 	.read     = seq_read,
 	.llseek   = seq_lseek,
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 38a3d51..b9232e4 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -260,7 +260,6 @@ static int ipx_seq_socket_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ipx_seq_interface_fops = {
-	.owner		= THIS_MODULE,
 	.open           = ipx_seq_interface_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
@@ -268,7 +267,6 @@ static const struct file_operations ipx_seq_interface_fops = {
 };
 
 static const struct file_operations ipx_seq_route_fops = {
-	.owner		= THIS_MODULE,
 	.open           = ipx_seq_route_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
@@ -276,7 +274,6 @@ static const struct file_operations ipx_seq_route_fops = {
 };
 
 static const struct file_operations ipx_seq_socket_fops = {
-	.owner		= THIS_MODULE,
 	.open           = ipx_seq_socket_open,
 	.read           = seq_read,
 	.llseek         = seq_lseek,
diff --git a/net/kcm/kcmproc.c b/net/kcm/kcmproc.c
index bd57233..9d5649e 100644
--- a/net/kcm/kcmproc.c
+++ b/net/kcm/kcmproc.c
@@ -247,7 +247,6 @@ static int kcm_seq_show(struct seq_file *seq, void *v)
 }
 
 static const struct file_operations kcm_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= kcm_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -397,7 +396,6 @@ static int kcm_stats_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations kcm_stats_seq_fops = {
-	.owner   = THIS_MODULE,
 	.open    = kcm_stats_seq_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 3dffb89..7e2e718 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -401,6 +401,11 @@ static int verify_address_len(const void *p)
 #endif
 	int len;
 
+	if (sp->sadb_address_len <
+	    DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family),
+			 sizeof(uint64_t)))
+		return -EINVAL;
+
 	switch (addr->sa_family) {
 	case AF_INET:
 		len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t));
@@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
 		uint16_t ext_type;
 		int ext_len;
 
+		if (len < sizeof(*ehdr))
+			return -EINVAL;
+
 		ext_len  = ehdr->sadb_ext_len;
 		ext_len *= sizeof(uint64_t);
 		ext_type = ehdr->sadb_ext_type;
@@ -2194,8 +2202,10 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
 		return PTR_ERR(out_skb);
 
 	err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
-	if (err < 0)
+	if (err < 0) {
+		kfree_skb(out_skb);
 		return err;
+	}
 
 	out_hdr = (struct sadb_msg *) out_skb->data;
 	out_hdr->sadb_msg_version = PF_KEY_V2;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index b412fc3..59f246d 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1734,7 +1734,6 @@ static int pppol2tp_proc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations pppol2tp_proc_fops = {
-	.owner		= THIS_MODULE,
 	.open		= pppol2tp_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 29c509c..66821e8 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -225,7 +225,6 @@ static int llc_seq_core_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations llc_seq_socket_fops = {
-	.owner		= THIS_MODULE,
 	.open		= llc_seq_socket_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -233,7 +232,6 @@ static const struct file_operations llc_seq_socket_fops = {
 };
 
 static const struct file_operations llc_seq_core_fops = {
-	.owner		= THIS_MODULE,
 	.open		= llc_seq_core_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 972bfe1..54cbf5b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2417,7 +2417,7 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
 						   struct nlmsghdr *,
 						   struct netlink_ext_ack *))
 {
-	struct netlink_ext_ack extack = {};
+	struct netlink_ext_ack extack;
 	struct nlmsghdr *nlh;
 	int err;
 
@@ -2438,6 +2438,7 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
 		if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
 			goto ack;
 
+		memset(&extack, 0, sizeof(extack));
 		err = cb(skb, nlh, &extack);
 		if (err == -EINTR)
 			goto skip;
@@ -2637,7 +2638,6 @@ static int netlink_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations netlink_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= netlink_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 7ed9d44..9ba30c6 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1344,7 +1344,6 @@ static int nr_info_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations nr_info_fops = {
-	.owner = THIS_MODULE,
 	.open = nr_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 75e6ba9..b5a7dcb 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -901,7 +901,6 @@ static int nr_node_info_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations nr_nodes_fops = {
-	.owner = THIS_MODULE,
 	.open = nr_node_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -968,7 +967,6 @@ static int nr_neigh_info_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations nr_neigh_fops = {
-	.owner = THIS_MODULE,
 	.open = nr_neigh_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index bce1f78..f143908 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -49,7 +49,6 @@
 #include <net/mpls.h>
 #include <net/vxlan.h>
 #include <net/tun_proto.h>
-#include <net/erspan.h>
 
 #include "flow_netlink.h"
 
@@ -334,8 +333,7 @@ size_t ovs_tun_key_attr_size(void)
 		 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
 		 */
 		+ nla_total_size(2)    /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
-		+ nla_total_size(2)    /* OVS_TUNNEL_KEY_ATTR_TP_DST */
-		+ nla_total_size(4);   /* OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS */
+		+ nla_total_size(2);   /* OVS_TUNNEL_KEY_ATTR_TP_DST */
 }
 
 static size_t ovs_nsh_key_attr_size(void)
@@ -402,7 +400,6 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1]
 						.next = ovs_vxlan_ext_key_lens },
 	[OVS_TUNNEL_KEY_ATTR_IPV6_SRC]      = { .len = sizeof(struct in6_addr) },
 	[OVS_TUNNEL_KEY_ATTR_IPV6_DST]      = { .len = sizeof(struct in6_addr) },
-	[OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS]   = { .len = sizeof(u32) },
 };
 
 static const struct ovs_len_tbl
@@ -634,33 +631,6 @@ static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
 	return 0;
 }
 
-static int erspan_tun_opt_from_nlattr(const struct nlattr *attr,
-				      struct sw_flow_match *match, bool is_mask,
-				      bool log)
-{
-	unsigned long opt_key_offset;
-	struct erspan_metadata opts;
-
-	BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
-
-	memset(&opts, 0, sizeof(opts));
-	opts.u.index = nla_get_be32(attr);
-
-	/* Index has only 20-bit */
-	if (ntohl(opts.u.index) & ~INDEX_MASK) {
-		OVS_NLERR(log, "ERSPAN index number %x too large.",
-			  ntohl(opts.u.index));
-		return -EINVAL;
-	}
-
-	SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), is_mask);
-	opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
-	SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
-				  is_mask);
-
-	return 0;
-}
-
 static int ip_tun_from_nlattr(const struct nlattr *attr,
 			      struct sw_flow_match *match, bool is_mask,
 			      bool log)
@@ -768,19 +738,6 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
 			break;
 		case OVS_TUNNEL_KEY_ATTR_PAD:
 			break;
-		case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
-			if (opts_type) {
-				OVS_NLERR(log, "Multiple metadata blocks provided");
-				return -EINVAL;
-			}
-
-			err = erspan_tun_opt_from_nlattr(a, match, is_mask, log);
-			if (err)
-				return err;
-
-			tun_flags |= TUNNEL_ERSPAN_OPT;
-			opts_type = type;
-			break;
 		default:
 			OVS_NLERR(log, "Unknown IP tunnel attribute %d",
 				  type);
@@ -905,10 +862,6 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
 		else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
 			 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
 			return -EMSGSIZE;
-		else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
-			 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
-				      ((struct erspan_metadata *)tun_opts)->u.index))
-			return -EMSGSIZE;
 	}
 
 	return 0;
@@ -2533,8 +2486,6 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
 			break;
 		case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
 			break;
-		case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
-			break;
 		}
 	};
 
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ee7aa0b..05d3186 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4531,7 +4531,6 @@ static int packet_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations packet_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= packet_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 1b050dd..fa2f13a 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -635,7 +635,6 @@ static int pn_sock_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations pn_sock_seq_fops = {
-	.owner = THIS_MODULE,
 	.open = pn_sock_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -818,7 +817,6 @@ static int pn_res_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations pn_res_seq_fops = {
-	.owner = THIS_MODULE,
 	.open = pn_res_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 6a5c499..083bd25 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1461,7 +1461,6 @@ static int rose_info_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations rose_info_fops = {
-	.owner = THIS_MODULE,
 	.open = rose_info_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 8ca3124..178619d 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -1156,7 +1156,6 @@ static int rose_nodes_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations rose_nodes_fops = {
-	.owner = THIS_MODULE,
 	.open = rose_nodes_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -1240,7 +1239,6 @@ static int rose_neigh_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations rose_neigh_fops = {
-	.owner = THIS_MODULE,
 	.open = rose_neigh_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -1326,7 +1324,6 @@ static int rose_route_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations rose_routes_fops = {
-	.owner = THIS_MODULE,
 	.open = rose_route_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 7421656..f79f260 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -125,7 +125,6 @@ static int rxrpc_call_seq_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations rxrpc_call_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= rxrpc_call_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -217,7 +216,6 @@ static int rxrpc_connection_seq_open(struct inode *inode, struct file *file)
 }
 
 const struct file_operations rxrpc_connection_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= rxrpc_connection_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 6708b69..e500d11 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -24,6 +24,7 @@
 #include <linux/init.h>
 #include <linux/kmod.h>
 #include <linux/slab.h>
+#include <linux/idr.h>
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <net/netlink.h>
@@ -121,8 +122,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
 }
 
 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
-					  u32 prio, u32 parent, struct Qdisc *q,
-					  struct tcf_chain *chain)
+					  u32 prio, struct tcf_chain *chain)
 {
 	struct tcf_proto *tp;
 	int err;
@@ -156,8 +156,6 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
 	tp->classify = tp->ops->classify;
 	tp->protocol = protocol;
 	tp->prio = prio;
-	tp->classid = parent;
-	tp->q = q;
 	tp->chain = chain;
 
 	err = tp->ops->init(tp);
@@ -179,6 +177,12 @@ static void tcf_proto_destroy(struct tcf_proto *tp)
 	kfree_rcu(tp, rcu);
 }
 
+struct tcf_filter_chain_list_item {
+	struct list_head list;
+	tcf_chain_head_change_t *chain_head_change;
+	void *chain_head_change_priv;
+};
+
 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
 					  u32 chain_index)
 {
@@ -187,6 +191,7 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
 	if (!chain)
 		return NULL;
+	INIT_LIST_HEAD(&chain->filter_chain_list);
 	list_add_tail(&chain->list, &block->chain_list);
 	chain->block = block;
 	chain->index = chain_index;
@@ -194,12 +199,19 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
 	return chain;
 }
 
+static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
+				       struct tcf_proto *tp_head)
+{
+	if (item->chain_head_change)
+		item->chain_head_change(tp_head, item->chain_head_change_priv);
+}
 static void tcf_chain_head_change(struct tcf_chain *chain,
 				  struct tcf_proto *tp_head)
 {
-	if (chain->chain_head_change)
-		chain->chain_head_change(tp_head,
-					 chain->chain_head_change_priv);
+	struct tcf_filter_chain_list_item *item;
+
+	list_for_each_entry(item, &chain->filter_chain_list, list)
+		tcf_chain_head_change_item(item, tp_head);
 }
 
 static void tcf_chain_flush(struct tcf_chain *chain)
@@ -253,47 +265,149 @@ void tcf_chain_put(struct tcf_chain *chain)
 }
 EXPORT_SYMBOL(tcf_chain_put);
 
-static void tcf_block_offload_cmd(struct tcf_block *block, struct Qdisc *q,
-				  struct tcf_block_ext_info *ei,
-				  enum tc_block_command command)
+static bool tcf_block_offload_in_use(struct tcf_block *block)
 {
-	struct net_device *dev = q->dev_queue->dev;
+	return block->offloadcnt;
+}
+
+static int tcf_block_offload_cmd(struct tcf_block *block,
+				 struct net_device *dev,
+				 struct tcf_block_ext_info *ei,
+				 enum tc_block_command command)
+{
 	struct tc_block_offload bo = {};
 
-	if (!dev->netdev_ops->ndo_setup_tc)
-		return;
 	bo.command = command;
 	bo.binder_type = ei->binder_type;
 	bo.block = block;
-	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
+	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
 }
 
-static void tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
-				   struct tcf_block_ext_info *ei)
+static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
+				  struct tcf_block_ext_info *ei)
 {
-	tcf_block_offload_cmd(block, q, ei, TC_BLOCK_BIND);
+	struct net_device *dev = q->dev_queue->dev;
+	int err;
+
+	if (!dev->netdev_ops->ndo_setup_tc)
+		goto no_offload_dev_inc;
+
+	/* If tc offload feature is disabled and the block we try to bind
+	 * to already has some offloaded filters, forbid to bind.
+	 */
+	if (!tc_can_offload(dev) && tcf_block_offload_in_use(block))
+		return -EOPNOTSUPP;
+
+	err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND);
+	if (err == -EOPNOTSUPP)
+		goto no_offload_dev_inc;
+	return err;
+
+no_offload_dev_inc:
+	if (tcf_block_offload_in_use(block))
+		return -EOPNOTSUPP;
+	block->nooffloaddevcnt++;
+	return 0;
 }
 
 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
 				     struct tcf_block_ext_info *ei)
 {
-	tcf_block_offload_cmd(block, q, ei, TC_BLOCK_UNBIND);
+	struct net_device *dev = q->dev_queue->dev;
+	int err;
+
+	if (!dev->netdev_ops->ndo_setup_tc)
+		goto no_offload_dev_dec;
+	err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND);
+	if (err == -EOPNOTSUPP)
+		goto no_offload_dev_dec;
+	return;
+
+no_offload_dev_dec:
+	WARN_ON(block->nooffloaddevcnt-- == 0);
 }
 
-int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-		      struct tcf_block_ext_info *ei,
-		      struct netlink_ext_ack *extack)
+static int
+tcf_chain_head_change_cb_add(struct tcf_chain *chain,
+			     struct tcf_block_ext_info *ei,
+			     struct netlink_ext_ack *extack)
 {
-	struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+	struct tcf_filter_chain_list_item *item;
+
+	item = kmalloc(sizeof(*item), GFP_KERNEL);
+	if (!item) {
+		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
+		return -ENOMEM;
+	}
+	item->chain_head_change = ei->chain_head_change;
+	item->chain_head_change_priv = ei->chain_head_change_priv;
+	if (chain->filter_chain)
+		tcf_chain_head_change_item(item, chain->filter_chain);
+	list_add(&item->list, &chain->filter_chain_list);
+	return 0;
+}
+
+static void
+tcf_chain_head_change_cb_del(struct tcf_chain *chain,
+			     struct tcf_block_ext_info *ei)
+{
+	struct tcf_filter_chain_list_item *item;
+
+	list_for_each_entry(item, &chain->filter_chain_list, list) {
+		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
+		    (item->chain_head_change == ei->chain_head_change &&
+		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
+			tcf_chain_head_change_item(item, NULL);
+			list_del(&item->list);
+			kfree(item);
+			return;
+		}
+	}
+	WARN_ON(1);
+}
+
+struct tcf_net {
+	struct idr idr;
+};
+
+static unsigned int tcf_net_id;
+
+static int tcf_block_insert(struct tcf_block *block, struct net *net,
+			    u32 block_index, struct netlink_ext_ack *extack)
+{
+	struct tcf_net *tn = net_generic(net, tcf_net_id);
+	int err;
+
+	err = idr_alloc_ext(&tn->idr, block, NULL, block_index,
+			    block_index + 1, GFP_KERNEL);
+	if (err)
+		return err;
+	block->index = block_index;
+	return 0;
+}
+
+static void tcf_block_remove(struct tcf_block *block, struct net *net)
+{
+	struct tcf_net *tn = net_generic(net, tcf_net_id);
+
+	idr_remove_ext(&tn->idr, block->index);
+}
+
+static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
+					  struct netlink_ext_ack *extack)
+{
+	struct tcf_block *block;
 	struct tcf_chain *chain;
 	int err;
 
+	block = kzalloc(sizeof(*block), GFP_KERNEL);
 	if (!block) {
 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 	}
 	INIT_LIST_HEAD(&block->chain_list);
 	INIT_LIST_HEAD(&block->cb_list);
+	INIT_LIST_HEAD(&block->owner_list);
 
 	/* Create chain 0 by default, it has to be always present. */
 	chain = tcf_chain_create(block, 0);
@@ -302,17 +416,149 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
 		err = -ENOMEM;
 		goto err_chain_create;
 	}
-	WARN_ON(!ei->chain_head_change);
-	chain->chain_head_change = ei->chain_head_change;
-	chain->chain_head_change_priv = ei->chain_head_change_priv;
 	block->net = qdisc_net(q);
+	block->refcnt = 1;
+	block->net = net;
 	block->q = q;
-	tcf_block_offload_bind(block, q, ei);
-	*p_block = block;
-	return 0;
+	return block;
 
 err_chain_create:
 	kfree(block);
+	return ERR_PTR(err);
+}
+
+static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
+{
+	struct tcf_net *tn = net_generic(net, tcf_net_id);
+
+	return idr_find_ext(&tn->idr, block_index);
+}
+
+static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block)
+{
+	return list_first_entry(&block->chain_list, struct tcf_chain, list);
+}
+
+struct tcf_block_owner_item {
+	struct list_head list;
+	struct Qdisc *q;
+	enum tcf_block_binder_type binder_type;
+};
+
+static void
+tcf_block_owner_netif_keep_dst(struct tcf_block *block,
+			       struct Qdisc *q,
+			       enum tcf_block_binder_type binder_type)
+{
+	if (block->keep_dst &&
+	    binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+	    binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+		netif_keep_dst(qdisc_dev(q));
+}
+
+void tcf_block_netif_keep_dst(struct tcf_block *block)
+{
+	struct tcf_block_owner_item *item;
+
+	block->keep_dst = true;
+	list_for_each_entry(item, &block->owner_list, list)
+		tcf_block_owner_netif_keep_dst(block, item->q,
+					       item->binder_type);
+}
+EXPORT_SYMBOL(tcf_block_netif_keep_dst);
+
+static int tcf_block_owner_add(struct tcf_block *block,
+			       struct Qdisc *q,
+			       enum tcf_block_binder_type binder_type)
+{
+	struct tcf_block_owner_item *item;
+
+	item = kmalloc(sizeof(*item), GFP_KERNEL);
+	if (!item)
+		return -ENOMEM;
+	item->q = q;
+	item->binder_type = binder_type;
+	list_add(&item->list, &block->owner_list);
+	return 0;
+}
+
+static void tcf_block_owner_del(struct tcf_block *block,
+				struct Qdisc *q,
+				enum tcf_block_binder_type binder_type)
+{
+	struct tcf_block_owner_item *item;
+
+	list_for_each_entry(item, &block->owner_list, list) {
+		if (item->q == q && item->binder_type == binder_type) {
+			list_del(&item->list);
+			kfree(item);
+			return;
+		}
+	}
+	WARN_ON(1);
+}
+
+int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
+		      struct tcf_block_ext_info *ei,
+		      struct netlink_ext_ack *extack)
+{
+	struct net *net = qdisc_net(q);
+	struct tcf_block *block = NULL;
+	bool created = false;
+	int err;
+
+	if (ei->block_index) {
+		/* block_index not 0 means the shared block is requested */
+		block = tcf_block_lookup(net, ei->block_index);
+		if (block)
+			block->refcnt++;
+	}
+
+	if (!block) {
+		block = tcf_block_create(net, q, extack);
+		if (IS_ERR(block))
+			return PTR_ERR(block);
+		created = true;
+		if (ei->block_index) {
+			err = tcf_block_insert(block, net,
+					       ei->block_index, extack);
+			if (err)
+				goto err_block_insert;
+		}
+	}
+
+	err = tcf_block_owner_add(block, q, ei->binder_type);
+	if (err)
+		goto err_block_owner_add;
+
+	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
+
+	err = tcf_chain_head_change_cb_add(tcf_block_chain_zero(block),
+					   ei, extack);
+	if (err)
+		goto err_chain_head_change_cb_add;
+
+	err = tcf_block_offload_bind(block, q, ei);
+	if (err)
+		goto err_block_offload_bind;
+
+	*p_block = block;
+	return 0;
+
+err_block_offload_bind:
+	tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
+err_chain_head_change_cb_add:
+	tcf_block_owner_del(block, q, ei->binder_type);
+err_block_owner_add:
+	if (created) {
+		if (tcf_block_shared(block))
+			tcf_block_remove(block, net);
+err_block_insert:
+		kfree(tcf_block_chain_zero(block));
+		kfree(block);
+	} else {
+		block->refcnt--;
+	}
 	return err;
 }
 EXPORT_SYMBOL(tcf_block_get_ext);
@@ -346,26 +592,35 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 {
 	struct tcf_chain *chain, *tmp;
 
-	/* Hold a refcnt for all chains, so that they don't disappear
-	 * while we are iterating.
-	 */
 	if (!block)
 		return;
-	list_for_each_entry(chain, &block->chain_list, list)
-		tcf_chain_hold(chain);
+	tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
+	tcf_block_owner_del(block, q, ei->binder_type);
 
-	list_for_each_entry(chain, &block->chain_list, list)
-		tcf_chain_flush(chain);
+	if (--block->refcnt == 0) {
+		if (tcf_block_shared(block))
+			tcf_block_remove(block, block->net);
+
+		/* Hold a refcnt for all chains, so that they don't disappear
+		 * while we are iterating.
+		 */
+		list_for_each_entry(chain, &block->chain_list, list)
+			tcf_chain_hold(chain);
+
+		list_for_each_entry(chain, &block->chain_list, list)
+			tcf_chain_flush(chain);
+	}
 
 	tcf_block_offload_unbind(block, q, ei);
 
-	/* At this point, all the chains should have refcnt >= 1. */
-	list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
-		tcf_chain_put(chain);
+	if (block->refcnt == 0) {
+		/* At this point, all the chains should have refcnt >= 1. */
+		list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+			tcf_chain_put(chain);
 
-	/* Finally, put chain 0 and allow block to be freed. */
-	chain = list_first_entry(&block->chain_list, struct tcf_chain, list);
-	tcf_chain_put(chain);
+		/* Finally, put chain 0 and allow block to be freed. */
+		tcf_chain_put(tcf_block_chain_zero(block));
+	}
 }
 EXPORT_SYMBOL(tcf_block_put_ext);
 
@@ -423,9 +678,16 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
 {
 	struct tcf_block_cb *block_cb;
 
+	/* At this point, playback of previous block cb calls is not supported,
+	 * so forbid to register to block which already has some offloaded
+	 * filters present.
+	 */
+	if (tcf_block_offload_in_use(block))
+		return ERR_PTR(-EOPNOTSUPP);
+
 	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
 	if (!block_cb)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	block_cb->cb = cb;
 	block_cb->cb_ident = cb_ident;
 	block_cb->cb_priv = cb_priv;
@@ -441,7 +703,7 @@ int tcf_block_cb_register(struct tcf_block *block,
 	struct tcf_block_cb *block_cb;
 
 	block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv);
-	return block_cb ? 0 : -ENOMEM;
+	return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0;
 }
 EXPORT_SYMBOL(tcf_block_cb_register);
 
@@ -471,6 +733,10 @@ static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
 	int ok_count = 0;
 	int err;
 
+	/* Make sure all netdevs sharing this block are offload-capable. */
+	if (block->nooffloaddevcnt && err_stop)
+		return -EOPNOTSUPP;
+
 	list_for_each_entry(block_cb, &block->cb_list, list) {
 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
 		if (err) {
@@ -524,8 +790,9 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 #ifdef CONFIG_NET_CLS_ACT
 reset:
 	if (unlikely(limit++ >= max_reclassify_loop)) {
-		net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
-				       tp->q->ops->id, tp->prio & 0xffff,
+		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
+				       tp->chain->block->index,
+				       tp->prio & 0xffff,
 				       ntohs(tp->protocol));
 		return TC_ACT_SHOT;
 	}
@@ -598,8 +865,9 @@ static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
 }
 
 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
-			 struct tcf_proto *tp, struct Qdisc *q, u32 parent,
-			 void *fh, u32 portid, u32 seq, u16 flags, int event)
+			 struct tcf_proto *tp, struct tcf_block *block,
+			 struct Qdisc *q, u32 parent, void *fh,
+			 u32 portid, u32 seq, u16 flags, int event)
 {
 	struct tcmsg *tcm;
 	struct nlmsghdr  *nlh;
@@ -612,8 +880,13 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
 	tcm->tcm_family = AF_UNSPEC;
 	tcm->tcm__pad1 = 0;
 	tcm->tcm__pad2 = 0;
-	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
-	tcm->tcm_parent = parent;
+	if (q) {
+		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
+		tcm->tcm_parent = parent;
+	} else {
+		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
+		tcm->tcm_block_index = block->index;
+	}
 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
 		goto nla_put_failure;
@@ -636,8 +909,8 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
 
 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
 			  struct nlmsghdr *n, struct tcf_proto *tp,
-			  struct Qdisc *q, u32 parent,
-			  void *fh, int event, bool unicast)
+			  struct tcf_block *block, struct Qdisc *q,
+			  u32 parent, void *fh, int event, bool unicast)
 {
 	struct sk_buff *skb;
 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
@@ -646,8 +919,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
 	if (!skb)
 		return -ENOBUFS;
 
-	if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq,
-			  n->nlmsg_flags, event) <= 0) {
+	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
+			  n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
 		kfree_skb(skb);
 		return -EINVAL;
 	}
@@ -661,8 +934,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
 
 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
 			      struct nlmsghdr *n, struct tcf_proto *tp,
-			      struct Qdisc *q, u32 parent,
-			      void *fh, bool unicast, bool *last)
+			      struct tcf_block *block, struct Qdisc *q,
+			      u32 parent, void *fh, bool unicast, bool *last)
 {
 	struct sk_buff *skb;
 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
@@ -672,8 +945,8 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
 	if (!skb)
 		return -ENOBUFS;
 
-	if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq,
-			  n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
+	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
+			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
 		kfree_skb(skb);
 		return -EINVAL;
 	}
@@ -692,15 +965,16 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
 }
 
 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
-				 struct Qdisc *q, u32 parent,
-				 struct nlmsghdr *n,
+				 struct tcf_block *block, struct Qdisc *q,
+				 u32 parent, struct nlmsghdr *n,
 				 struct tcf_chain *chain, int event)
 {
 	struct tcf_proto *tp;
 
 	for (tp = rtnl_dereference(chain->filter_chain);
 	     tp; tp = rtnl_dereference(tp->next))
-		tfilter_notify(net, oskb, n, tp, q, parent, 0, event, false);
+		tfilter_notify(net, oskb, n, tp, block,
+			       q, parent, 0, event, false);
 }
 
 /* Add/change/delete/get a filter node */
@@ -716,13 +990,11 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 	bool prio_allocate;
 	u32 parent;
 	u32 chain_index;
-	struct net_device *dev;
-	struct Qdisc  *q;
+	struct Qdisc *q = NULL;
 	struct tcf_chain_info chain_info;
 	struct tcf_chain *chain = NULL;
 	struct tcf_block *block;
 	struct tcf_proto *tp;
-	const struct Qdisc_class_ops *cops;
 	unsigned long cl;
 	void *fh;
 	int err;
@@ -769,41 +1041,58 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 
 	/* Find head of filter chain. */
 
-	/* Find link */
-	dev = __dev_get_by_index(net, t->tcm_ifindex);
-	if (dev == NULL)
-		return -ENODEV;
-
-	/* Find qdisc */
-	if (!parent) {
-		q = dev->qdisc;
-		parent = q->handle;
+	if (t->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
+		block = tcf_block_lookup(net, t->tcm_block_index);
+		if (!block) {
+			NL_SET_ERR_MSG(extack, "Block of given index was not found");
+			err = -EINVAL;
+			goto errout;
+		}
 	} else {
-		q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
-		if (q == NULL)
+		const struct Qdisc_class_ops *cops;
+		struct net_device *dev;
+
+		/* Find link */
+		dev = __dev_get_by_index(net, t->tcm_ifindex);
+		if (!dev)
+			return -ENODEV;
+
+		/* Find qdisc */
+		if (!parent) {
+			q = dev->qdisc;
+			parent = q->handle;
+		} else {
+			q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
+			if (!q)
+				return -EINVAL;
+		}
+
+		/* Is it classful? */
+		cops = q->ops->cl_ops;
+		if (!cops)
 			return -EINVAL;
-	}
 
-	/* Is it classful? */
-	cops = q->ops->cl_ops;
-	if (!cops)
-		return -EINVAL;
+		if (!cops->tcf_block)
+			return -EOPNOTSUPP;
 
-	if (!cops->tcf_block)
-		return -EOPNOTSUPP;
+		/* Do we search for filter, attached to class? */
+		if (TC_H_MIN(parent)) {
+			cl = cops->find(q, parent);
+			if (cl == 0)
+				return -ENOENT;
+		}
 
-	/* Do we search for filter, attached to class? */
-	if (TC_H_MIN(parent)) {
-		cl = cops->find(q, parent);
-		if (cl == 0)
-			return -ENOENT;
-	}
-
-	/* And the last stroke */
-	block = cops->tcf_block(q, cl, extack);
-	if (!block) {
-		err = -EINVAL;
-		goto errout;
+		/* And the last stroke */
+		block = cops->tcf_block(q, cl, extack);
+		if (!block) {
+			err = -EINVAL;
+			goto errout;
+		}
+		if (tcf_block_shared(block)) {
+			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
+			err = -EOPNOTSUPP;
+			goto errout;
+		}
 	}
 
 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
@@ -819,7 +1108,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 	}
 
 	if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
-		tfilter_notify_chain(net, skb, q, parent, n,
+		tfilter_notify_chain(net, skb, block, q, parent, n,
 				     chain, RTM_DELTFILTER);
 		tcf_chain_flush(chain);
 		err = 0;
@@ -851,7 +1140,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 			prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
 
 		tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
-				      protocol, prio, parent, q, chain);
+				      protocol, prio, chain);
 		if (IS_ERR(tp)) {
 			err = PTR_ERR(tp);
 			goto errout;
@@ -867,7 +1156,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 	if (!fh) {
 		if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
 			tcf_chain_tp_remove(chain, &chain_info, tp);
-			tfilter_notify(net, skb, n, tp, q, parent, fh,
+			tfilter_notify(net, skb, n, tp, block, q, parent, fh,
 				       RTM_DELTFILTER, false);
 			tcf_proto_destroy(tp);
 			err = 0;
@@ -892,8 +1181,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 			}
 			break;
 		case RTM_DELTFILTER:
-			err = tfilter_del_notify(net, skb, n, tp, q, parent,
-						 fh, false, &last);
+			err = tfilter_del_notify(net, skb, n, tp, block,
+						 q, parent, fh, false, &last);
 			if (err)
 				goto errout;
 			if (last) {
@@ -902,8 +1191,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 			}
 			goto errout;
 		case RTM_GETTFILTER:
-			err = tfilter_notify(net, skb, n, tp, q, parent, fh,
-					     RTM_NEWTFILTER, true);
+			err = tfilter_notify(net, skb, n, tp, block, q, parent,
+					     fh, RTM_NEWTFILTER, true);
 			goto errout;
 		default:
 			err = -EINVAL;
@@ -916,7 +1205,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 	if (err == 0) {
 		if (tp_created)
 			tcf_chain_tp_insert(chain, &chain_info, tp);
-		tfilter_notify(net, skb, n, tp, q, parent, fh,
+		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
 			       RTM_NEWTFILTER, false);
 	} else {
 		if (tp_created)
@@ -936,6 +1225,7 @@ struct tcf_dump_args {
 	struct tcf_walker w;
 	struct sk_buff *skb;
 	struct netlink_callback *cb;
+	struct tcf_block *block;
 	struct Qdisc *q;
 	u32 parent;
 };
@@ -945,7 +1235,7 @@ static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
 	struct tcf_dump_args *a = (void *)arg;
 	struct net *net = sock_net(a->skb->sk);
 
-	return tcf_fill_node(net, a->skb, tp, a->q, a->parent,
+	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
 			     n, NETLINK_CB(a->cb->skb).portid,
 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
 			     RTM_NEWTFILTER);
@@ -956,6 +1246,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
 			   long index_start, long *p_index)
 {
 	struct net *net = sock_net(skb->sk);
+	struct tcf_block *block = chain->block;
 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
 	struct tcf_dump_args arg;
 	struct tcf_proto *tp;
@@ -974,7 +1265,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
 			memset(&cb->args[1], 0,
 			       sizeof(cb->args) - sizeof(cb->args[0]));
 		if (cb->args[1] == 0) {
-			if (tcf_fill_node(net, skb, tp, q, parent, 0,
+			if (tcf_fill_node(net, skb, tp, block, q, parent, 0,
 					  NETLINK_CB(cb->skb).portid,
 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
 					  RTM_NEWTFILTER) <= 0)
@@ -987,6 +1278,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
 		arg.w.fn = tcf_node_dump;
 		arg.skb = skb;
 		arg.cb = cb;
+		arg.block = block;
 		arg.q = q;
 		arg.parent = parent;
 		arg.w.stop = 0;
@@ -1005,13 +1297,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	struct net *net = sock_net(skb->sk);
 	struct nlattr *tca[TCA_MAX + 1];
-	struct net_device *dev;
-	struct Qdisc *q;
+	struct Qdisc *q = NULL;
 	struct tcf_block *block;
 	struct tcf_chain *chain;
 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
-	unsigned long cl = 0;
-	const struct Qdisc_class_ops *cops;
 	long index_start;
 	long index;
 	u32 parent;
@@ -1024,32 +1313,44 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
 	if (err)
 		return err;
 
-	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
-	if (!dev)
-		return skb->len;
-
-	parent = tcm->tcm_parent;
-	if (!parent) {
-		q = dev->qdisc;
-		parent = q->handle;
-	} else {
-		q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
-	}
-	if (!q)
-		goto out;
-	cops = q->ops->cl_ops;
-	if (!cops)
-		goto out;
-	if (!cops->tcf_block)
-		goto out;
-	if (TC_H_MIN(tcm->tcm_parent)) {
-		cl = cops->find(q, tcm->tcm_parent);
-		if (cl == 0)
+	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
+		block = tcf_block_lookup(net, tcm->tcm_block_index);
+		if (!block)
 			goto out;
+	} else {
+		const struct Qdisc_class_ops *cops;
+		struct net_device *dev;
+		unsigned long cl = 0;
+
+		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+		if (!dev)
+			return skb->len;
+
+		parent = tcm->tcm_parent;
+		if (!parent) {
+			q = dev->qdisc;
+			parent = q->handle;
+		} else {
+			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
+		}
+		if (!q)
+			goto out;
+		cops = q->ops->cl_ops;
+		if (!cops)
+			goto out;
+		if (!cops->tcf_block)
+			goto out;
+		if (TC_H_MIN(tcm->tcm_parent)) {
+			cl = cops->find(q, tcm->tcm_parent);
+			if (cl == 0)
+				goto out;
+		}
+		block = cops->tcf_block(q, cl, NULL);
+		if (!block)
+			goto out;
+		if (tcf_block_shared(block))
+			q = NULL;
 	}
-	block = cops->tcf_block(q, cl, NULL);
-	if (!block)
-		goto out;
 
 	index_start = cb->args[0];
 	index = 0;
@@ -1252,18 +1553,50 @@ int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
 }
 EXPORT_SYMBOL(tc_setup_cb_call);
 
+static __net_init int tcf_net_init(struct net *net)
+{
+	struct tcf_net *tn = net_generic(net, tcf_net_id);
+
+	idr_init(&tn->idr);
+	return 0;
+}
+
+static void __net_exit tcf_net_exit(struct net *net)
+{
+	struct tcf_net *tn = net_generic(net, tcf_net_id);
+
+	idr_destroy(&tn->idr);
+}
+
+static struct pernet_operations tcf_net_ops = {
+	.init = tcf_net_init,
+	.exit = tcf_net_exit,
+	.id   = &tcf_net_id,
+	.size = sizeof(struct tcf_net),
+};
+
 static int __init tc_filter_init(void)
 {
+	int err;
+
 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
 	if (!tc_filter_wq)
 		return -ENOMEM;
 
+	err = register_pernet_subsys(&tcf_net_ops);
+	if (err)
+		goto err_register_pernet_subsys;
+
 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
 		      tc_dump_tfilter, 0);
 
 	return 0;
+
+err_register_pernet_subsys:
+	destroy_workqueue(tc_filter_wq);
+	return err;
 }
 
 subsys_initcall(tc_filter_init);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 8d78e7f..cf72aef 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -167,13 +167,16 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
 	cls_bpf.exts_integrated = obj->exts_integrated;
 	cls_bpf.gen_flags = obj->gen_flags;
 
+	if (oldprog)
+		tcf_block_offload_dec(block, &oldprog->gen_flags);
+
 	err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
 	if (prog) {
 		if (err < 0) {
 			cls_bpf_offload_cmd(tp, oldprog, prog);
 			return err;
 		} else if (err > 0) {
-			prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
+			tcf_block_offload_inc(block, &prog->gen_flags);
 		}
 	}
 
@@ -392,8 +395,8 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
 	prog->bpf_name = name;
 	prog->filter = fp;
 
-	if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
-		netif_keep_dst(qdisc_dev(tp->q));
+	if (fp->dst_needed)
+		tcf_block_netif_keep_dst(tp->chain->block);
 
 	return 0;
 }
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 25c2a88..28cd6fb 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -526,7 +526,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
 
 	timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
 
-	netif_keep_dst(qdisc_dev(tp->q));
+	tcf_block_netif_keep_dst(tp->chain->block);
 
 	if (tb[TCA_FLOW_KEYS]) {
 		fnew->keymask = keymask;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 6132a73..f61df19 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -229,6 +229,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
 
 	tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
 			 &cls_flower, false);
+	tcf_block_offload_dec(block, &f->flags);
 }
 
 static int fl_hw_replace_filter(struct tcf_proto *tp,
@@ -256,7 +257,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
 		fl_hw_destroy_filter(tp, f);
 		return err;
 	} else if (err > 0) {
-		f->flags |= TCA_CLS_FLAGS_IN_HW;
+		tcf_block_offload_inc(block, &f->flags);
 	}
 
 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 66d4e00..d0e57c8 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -81,6 +81,7 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp,
 	cls_mall.cookie = cookie;
 
 	tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, false);
+	tcf_block_offload_dec(block, &head->flags);
 }
 
 static int mall_replace_hw_filter(struct tcf_proto *tp,
@@ -103,7 +104,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
 		mall_destroy_hw_filter(tp, head, cookie);
 		return err;
 	} else if (err > 0) {
-		head->flags |= TCA_CLS_FLAGS_IN_HW;
+		tcf_block_offload_inc(block, &head->flags);
 	}
 
 	if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index ac9a5b8..a1f2b1b 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -527,7 +527,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
 		if (f->handle < f1->handle)
 			break;
 
-	netif_keep_dst(qdisc_dev(tp->q));
+	tcf_block_netif_keep_dst(tp->chain->block);
 	rcu_assign_pointer(f->next, f1);
 	rcu_assign_pointer(*fp, f);
 
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 507859c..020d328 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -529,16 +529,17 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
 	return 0;
 }
 
-static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
+static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n)
 {
 	struct tcf_block *block = tp->chain->block;
 	struct tc_cls_u32_offload cls_u32 = {};
 
 	tc_cls_common_offload_init(&cls_u32.common, tp);
 	cls_u32.command = TC_CLSU32_DELETE_KNODE;
-	cls_u32.knode.handle = handle;
+	cls_u32.knode.handle = n->handle;
 
 	tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
+	tcf_block_offload_dec(block, &n->flags);
 }
 
 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
@@ -567,10 +568,10 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 
 	err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
 	if (err < 0) {
-		u32_remove_hw_knode(tp, n->handle);
+		u32_remove_hw_knode(tp, n);
 		return err;
 	} else if (err > 0) {
-		n->flags |= TCA_CLS_FLAGS_IN_HW;
+		tcf_block_offload_inc(block, &n->flags);
 	}
 
 	if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
@@ -589,7 +590,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
 			RCU_INIT_POINTER(ht->ht[h],
 					 rtnl_dereference(n->next));
 			tcf_unbind_filter(tp, &n->res);
-			u32_remove_hw_knode(tp, n->handle);
+			u32_remove_hw_knode(tp, n);
 			idr_remove_ext(&ht->handle_idr, n->handle);
 			if (tcf_exts_get_net(&n->exts))
 				call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
@@ -682,7 +683,7 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last)
 		goto out;
 
 	if (TC_U32_KEY(ht->handle)) {
-		u32_remove_hw_knode(tp, ht->handle);
+		u32_remove_hw_knode(tp, (struct tc_u_knode *)ht);
 		ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
 		goto out;
 	}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 8a04c36..d512f49 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -791,6 +791,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
 	unsigned char *b = skb_tail_pointer(skb);
 	struct gnet_dump d;
 	struct qdisc_size_table *stab;
+	u32 block_index;
 	__u32 qlen;
 
 	cond_resched();
@@ -807,6 +808,18 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
 	tcm->tcm_info = refcount_read(&q->refcnt);
 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
 		goto nla_put_failure;
+	if (q->ops->ingress_block_get) {
+		block_index = q->ops->ingress_block_get(q);
+		if (block_index &&
+		    nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
+			goto nla_put_failure;
+	}
+	if (q->ops->egress_block_get) {
+		block_index = q->ops->egress_block_get(q);
+		if (block_index &&
+		    nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
+			goto nla_put_failure;
+	}
 	if (q->ops->dump && q->ops->dump(q, skb) < 0)
 		goto nla_put_failure;
 	if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
@@ -994,6 +1007,40 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 	return err;
 }
 
+static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
+				   struct netlink_ext_ack *extack)
+{
+	u32 block_index;
+
+	if (tca[TCA_INGRESS_BLOCK]) {
+		block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
+
+		if (!block_index) {
+			NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
+			return -EINVAL;
+		}
+		if (!sch->ops->ingress_block_set) {
+			NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
+			return -EOPNOTSUPP;
+		}
+		sch->ops->ingress_block_set(sch, block_index);
+	}
+	if (tca[TCA_EGRESS_BLOCK]) {
+		block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
+
+		if (!block_index) {
+			NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
+			return -EINVAL;
+		}
+		if (!sch->ops->egress_block_set) {
+			NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
+			return -EOPNOTSUPP;
+		}
+		sch->ops->egress_block_set(sch, block_index);
+	}
+	return 0;
+}
+
 /* lockdep annotation is needed for ingress; egress gets it only for name */
 static struct lock_class_key qdisc_tx_lock;
 static struct lock_class_key qdisc_rx_lock;
@@ -1088,23 +1135,16 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
 		netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
 	}
 
+	err = qdisc_block_indexes_set(sch, tca, extack);
+	if (err)
+		goto err_out3;
+
 	if (ops->init) {
 		err = ops->init(sch, tca[TCA_OPTIONS], extack);
 		if (err != 0)
 			goto err_out5;
 	}
 
-	if (qdisc_is_percpu_stats(sch)) {
-		sch->cpu_bstats =
-			netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
-		if (!sch->cpu_bstats)
-			goto err_out4;
-
-		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
-		if (!sch->cpu_qstats)
-			goto err_out4;
-	}
-
 	if (tca[TCA_STAB]) {
 		stab = qdisc_get_stab(tca[TCA_STAB], extack);
 		if (IS_ERR(stab)) {
@@ -1151,7 +1191,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
 		ops->destroy(sch);
 err_out3:
 	dev_put(dev);
-	kfree((char *) sch - sch->padded);
+	qdisc_free(sch);
 err_out2:
 	module_put(ops->owner);
 err_out:
@@ -1159,8 +1199,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
 	return NULL;
 
 err_out4:
-	free_percpu(sch->cpu_bstats);
-	free_percpu(sch->cpu_qstats);
 	/*
 	 * Any broken qdiscs that would require a ops->reset() here?
 	 * The qdisc was never in action so it shouldn't be necessary.
@@ -1182,6 +1220,10 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
 			NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
 			return -EINVAL;
 		}
+		if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
+			NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
+			return -EOPNOTSUPP;
+		}
 		err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
 		if (err)
 			return err;
@@ -1907,6 +1949,11 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
 		}
 	}
 
+	if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
+		NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
+		return -EOPNOTSUPP;
+	}
+
 	new_cl = cl;
 	err = -EOPNOTSUPP;
 	if (cops->change)
@@ -2046,7 +2093,6 @@ static int psched_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations psched_fops = {
-	.owner = THIS_MODULE,
 	.open = psched_open,
 	.read  = seq_read,
 	.llseek = seq_lseek,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index a883c50..ef8b4ec 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -907,7 +907,7 @@ void qdisc_reset(struct Qdisc *qdisc)
 }
 EXPORT_SYMBOL(qdisc_reset);
 
-static void qdisc_free(struct Qdisc *qdisc)
+void qdisc_free(struct Qdisc *qdisc)
 {
 	if (qdisc_is_percpu_stats(qdisc)) {
 		free_percpu(qdisc->cpu_bstats);
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 7ca2be2..ce3f552 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -61,6 +61,20 @@ static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
 	struct mini_Qdisc_pair *miniqp = priv;
 
 	mini_qdisc_pair_swap(miniqp, tp_head);
+};
+
+static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
+{
+	struct ingress_sched_data *q = qdisc_priv(sch);
+
+	q->block_info.block_index = block_index;
+}
+
+static u32 ingress_ingress_block_get(struct Qdisc *sch)
+{
+	struct ingress_sched_data *q = qdisc_priv(sch);
+
+	return q->block_info.block_index;
 }
 
 static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
@@ -68,7 +82,6 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
 {
 	struct ingress_sched_data *q = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
-	int err;
 
 	net_inc_ingress_queue();
 
@@ -78,13 +91,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
 	q->block_info.chain_head_change = clsact_chain_head_change;
 	q->block_info.chain_head_change_priv = &q->miniqp;
 
-	err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
-	if (err)
-		return err;
-
-	sch->flags |= TCQ_F_CPUSTATS;
-
-	return 0;
+	return tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
 }
 
 static void ingress_destroy(struct Qdisc *sch)
@@ -120,13 +127,16 @@ static const struct Qdisc_class_ops ingress_class_ops = {
 };
 
 static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
-	.cl_ops		=	&ingress_class_ops,
-	.id		=	"ingress",
-	.priv_size	=	sizeof(struct ingress_sched_data),
-	.init		=	ingress_init,
-	.destroy	=	ingress_destroy,
-	.dump		=	ingress_dump,
-	.owner		=	THIS_MODULE,
+	.cl_ops			=	&ingress_class_ops,
+	.id			=	"ingress",
+	.priv_size		=	sizeof(struct ingress_sched_data),
+	.static_flags		=	TCQ_F_CPUSTATS,
+	.init			=	ingress_init,
+	.destroy		=	ingress_destroy,
+	.dump			=	ingress_dump,
+	.ingress_block_set	=	ingress_ingress_block_set,
+	.ingress_block_get	=	ingress_ingress_block_get,
+	.owner			=	THIS_MODULE,
 };
 
 struct clsact_sched_data {
@@ -170,6 +180,34 @@ static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
 	}
 }
 
+static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
+{
+	struct clsact_sched_data *q = qdisc_priv(sch);
+
+	q->ingress_block_info.block_index = block_index;
+}
+
+static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
+{
+	struct clsact_sched_data *q = qdisc_priv(sch);
+
+	q->egress_block_info.block_index = block_index;
+}
+
+static u32 clsact_ingress_block_get(struct Qdisc *sch)
+{
+	struct clsact_sched_data *q = qdisc_priv(sch);
+
+	return q->ingress_block_info.block_index;
+}
+
+static u32 clsact_egress_block_get(struct Qdisc *sch)
+{
+	struct clsact_sched_data *q = qdisc_priv(sch);
+
+	return q->egress_block_info.block_index;
+}
+
 static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
 		       struct netlink_ext_ack *extack)
 {
@@ -197,14 +235,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
 	q->egress_block_info.chain_head_change = clsact_chain_head_change;
 	q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
 
-	err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info,
-				extack);
-	if (err)
-		return err;
-
-	sch->flags |= TCQ_F_CPUSTATS;
-
-	return 0;
+	return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
 }
 
 static void clsact_destroy(struct Qdisc *sch)
@@ -228,13 +259,18 @@ static const struct Qdisc_class_ops clsact_class_ops = {
 };
 
 static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
-	.cl_ops		=	&clsact_class_ops,
-	.id		=	"clsact",
-	.priv_size	=	sizeof(struct clsact_sched_data),
-	.init		=	clsact_init,
-	.destroy	=	clsact_destroy,
-	.dump		=	ingress_dump,
-	.owner		=	THIS_MODULE,
+	.cl_ops			=	&clsact_class_ops,
+	.id			=	"clsact",
+	.priv_size		=	sizeof(struct clsact_sched_data),
+	.static_flags		=	TCQ_F_CPUSTATS,
+	.init			=	clsact_init,
+	.destroy		=	clsact_destroy,
+	.dump			=	ingress_dump,
+	.ingress_block_set	=	clsact_ingress_block_set,
+	.egress_block_set	=	clsact_egress_block_set,
+	.ingress_block_get	=	clsact_ingress_block_get,
+	.egress_block_get	=	clsact_egress_block_get,
+	.owner			=	THIS_MODULE,
 };
 
 static int __init ingress_module_init(void)
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 0af1c12..16644b3 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -167,6 +167,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
 		opt.set.max = q->parms.qth_max >> q->parms.Wlog;
 		opt.set.probability = q->parms.max_P;
 		opt.set.is_ecn = red_use_ecn(q);
+		opt.set.qstats = &sch->qstats;
 	} else {
 		opt.command = TC_RED_DESTROY;
 	}
@@ -322,7 +323,6 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
 	};
 	int err;
 
-	sch->qstats.backlog = q->qdisc->qstats.backlog;
 	err = red_dump_offload_stats(sch, &opt);
 	if (err)
 		goto nla_put_failure;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 3b18085e..5d4c15b 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -826,6 +826,7 @@ static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp)
 	case AF_INET:
 		if (!__ipv6_only_sock(sctp_opt2sk(sp)))
 			return 1;
+		/* fallthru */
 	default:
 		return 0;
 	}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index af9b5eb..f211b3d 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -916,9 +916,9 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
 			break;
 
 		case SCTP_CID_ABORT:
-			if (sctp_test_T_bit(chunk)) {
+			if (sctp_test_T_bit(chunk))
 				packet->vtag = asoc->c.my_vtag;
-			}
+			/* fallthru */
 		/* The following chunks are "response" chunks, i.e.
 		 * they are generated in response to something we
 		 * received.  If we are sending these, then we can
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 4545bc2..537545eb 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -95,7 +95,6 @@ static int sctp_snmp_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations sctp_snmp_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = sctp_snmp_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6a54ff0..7ff444e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -85,7 +85,7 @@
 static int sctp_writeable(struct sock *sk);
 static void sctp_wfree(struct sk_buff *skb);
 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
-				size_t msg_len, struct sock **orig_sk);
+				size_t msg_len);
 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
 static int sctp_wait_for_accept(struct sock *sk, long timeo);
@@ -351,16 +351,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
 	if (len < sizeof (struct sockaddr))
 		return NULL;
 
+	if (!opt->pf->af_supported(addr->sa.sa_family, opt))
+		return NULL;
+
 	/* V4 mapped address are really of AF_INET family */
 	if (addr->sa.sa_family == AF_INET6 &&
-	    ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
-		if (!opt->pf->af_supported(AF_INET, opt))
-			return NULL;
-	} else {
-		/* Does this PF support this AF? */
-		if (!opt->pf->af_supported(addr->sa.sa_family, opt))
-			return NULL;
-	}
+	    ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
+	    !opt->pf->af_supported(AF_INET, opt))
+		return NULL;
 
 	/* If we get this far, af is valid. */
 	af = sctp_get_af_specific(addr->sa.sa_family);
@@ -1900,8 +1898,14 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
 		 */
 		if (sinit) {
 			if (sinit->sinit_num_ostreams) {
-				asoc->c.sinit_num_ostreams =
-					sinit->sinit_num_ostreams;
+				__u16 outcnt = sinit->sinit_num_ostreams;
+
+				asoc->c.sinit_num_ostreams = outcnt;
+				/* outcnt has been changed, so re-init stream */
+				err = sctp_stream_init(&asoc->stream, outcnt, 0,
+						       GFP_KERNEL);
+				if (err)
+					goto out_free;
 			}
 			if (sinit->sinit_max_instreams) {
 				asoc->c.sinit_max_instreams =
@@ -1988,7 +1992,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 	if (!sctp_wspace(asoc)) {
 		/* sk can be changed by peel off when waiting for buf. */
-		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
+		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
 		if (err) {
 			if (err == -ESRCH) {
 				/* asoc is already dead. */
@@ -8121,12 +8125,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
 
 /* Helper function to wait for space in the sndbuf.  */
 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
-				size_t msg_len, struct sock **orig_sk)
+				size_t msg_len)
 {
 	struct sock *sk = asoc->base.sk;
-	int err = 0;
 	long current_timeo = *timeo_p;
 	DEFINE_WAIT(wait);
+	int err = 0;
 
 	pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
 		 *timeo_p, msg_len);
@@ -8155,17 +8159,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
 		release_sock(sk);
 		current_timeo = schedule_timeout(current_timeo);
 		lock_sock(sk);
-		if (sk != asoc->base.sk) {
-			release_sock(sk);
-			sk = asoc->base.sk;
-			lock_sock(sk);
-		}
+		if (sk != asoc->base.sk)
+			goto do_error;
 
 		*timeo_p = current_timeo;
 	}
 
 out:
-	*orig_sk = sk;
 	finish_wait(&asoc->wait, &wait);
 
 	/* Release the association's refcnt.  */
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 507017f..9036d87 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1880,36 +1880,38 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
 
 	if (strcmp(name, tipc_bclink_name) == 0) {
 		err = tipc_nl_add_bc_link(net, &msg);
-		if (err) {
-			nlmsg_free(msg.skb);
-			return err;
-		}
+		if (err)
+			goto err_free;
 	} else {
 		int bearer_id;
 		struct tipc_node *node;
 		struct tipc_link *link;
 
 		node = tipc_node_find_by_name(net, name, &bearer_id);
-		if (!node)
-			return -EINVAL;
+		if (!node) {
+			err = -EINVAL;
+			goto err_free;
+		}
 
 		tipc_node_read_lock(node);
 		link = node->links[bearer_id].link;
 		if (!link) {
 			tipc_node_read_unlock(node);
-			nlmsg_free(msg.skb);
-			return -EINVAL;
+			err = -EINVAL;
+			goto err_free;
 		}
 
 		err = __tipc_nl_add_link(net, &msg, link, 0);
 		tipc_node_read_unlock(node);
-		if (err) {
-			nlmsg_free(msg.skb);
-			return err;
-		}
+		if (err)
+			goto err_free;
 	}
 
 	return genlmsg_reply(msg.skb, info);
+
+err_free:
+	nlmsg_free(msg.skb);
+	return err;
 }
 
 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 8ee5e86..c0d331f 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -132,10 +132,11 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
 
 	spin_lock_bh(&s->idr_lock);
 	con = idr_find(&s->conn_idr, conid);
-	if (con && test_bit(CF_CONNECTED, &con->flags))
-		conn_get(con);
-	else
-		con = NULL;
+	if (con) {
+		if (!test_bit(CF_CONNECTED, &con->flags) ||
+		    !kref_get_unless_zero(&con->kref))
+			con = NULL;
+	}
 	spin_unlock_bh(&s->idr_lock);
 	return con;
 }
@@ -183,35 +184,28 @@ static void tipc_register_callbacks(struct socket *sock, struct tipc_conn *con)
 	write_unlock_bh(&sk->sk_callback_lock);
 }
 
-static void tipc_unregister_callbacks(struct tipc_conn *con)
-{
-	struct sock *sk = con->sock->sk;
-
-	write_lock_bh(&sk->sk_callback_lock);
-	sk->sk_user_data = NULL;
-	write_unlock_bh(&sk->sk_callback_lock);
-}
-
 static void tipc_close_conn(struct tipc_conn *con)
 {
 	struct tipc_server *s = con->server;
+	struct sock *sk = con->sock->sk;
+	bool disconnect = false;
 
-	if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
-		if (con->sock)
-			tipc_unregister_callbacks(con);
-
+	write_lock_bh(&sk->sk_callback_lock);
+	disconnect = test_and_clear_bit(CF_CONNECTED, &con->flags);
+	if (disconnect) {
+		sk->sk_user_data = NULL;
 		if (con->conid)
 			s->tipc_conn_release(con->conid, con->usr_data);
-
-		/* We shouldn't flush pending works as we may be in the
-		 * thread. In fact the races with pending rx/tx work structs
-		 * are harmless for us here as we have already deleted this
-		 * connection from server connection list.
-		 */
-		if (con->sock)
-			kernel_sock_shutdown(con->sock, SHUT_RDWR);
-		conn_put(con);
 	}
+	write_unlock_bh(&sk->sk_callback_lock);
+
+	/* Handle concurrent calls from sending and receiving threads */
+	if (!disconnect)
+		return;
+
+	/* Don't flush pending works, -just let them expire */
+	kernel_sock_shutdown(con->sock, SHUT_RDWR);
+	conn_put(con);
 }
 
 static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
@@ -248,9 +242,10 @@ static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
 
 static int tipc_receive_from_sock(struct tipc_conn *con)
 {
-	struct msghdr msg = {};
 	struct tipc_server *s = con->server;
+	struct sock *sk = con->sock->sk;
 	struct sockaddr_tipc addr;
+	struct msghdr msg = {};
 	struct kvec iov;
 	void *buf;
 	int ret;
@@ -271,12 +266,15 @@ static int tipc_receive_from_sock(struct tipc_conn *con)
 		goto out_close;
 	}
 
-	s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr,
-			     con->usr_data, buf, ret);
-
+	read_lock_bh(&sk->sk_callback_lock);
+	if (test_bit(CF_CONNECTED, &con->flags))
+		ret = s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid,
+					   &addr, con->usr_data, buf, ret);
+	read_unlock_bh(&sk->sk_callback_lock);
 	kmem_cache_free(s->rcvbuf_cache, buf);
-
-	return 0;
+	if (ret < 0)
+		tipc_conn_terminate(s, con->conid);
+	return ret;
 
 out_close:
 	if (ret != -EWOULDBLOCK)
@@ -525,11 +523,17 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
 void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
 {
 	struct tipc_conn *con;
+	struct tipc_server *srv;
 
 	con = tipc_conn_lookup(tipc_topsrv(net), conid);
 	if (!con)
 		return;
-	tipc_close_conn(con);
+
+	test_and_clear_bit(CF_CONNECTED, &con->flags);
+	srv = con->server;
+	if (con->conid)
+		srv->tipc_conn_release(con->conid, con->usr_data);
+	conn_put(con);
 	conn_put(con);
 }
 
diff --git a/net/tipc/server.h b/net/tipc/server.h
index 17f49ee..64df751 100644
--- a/net/tipc/server.h
+++ b/net/tipc/server.h
@@ -74,9 +74,9 @@ struct tipc_server {
 	int max_rcvbuf_size;
 	void *(*tipc_conn_new)(int conid);
 	void (*tipc_conn_release)(int conid, void *usr_data);
-	void (*tipc_conn_recvmsg)(struct net *net, int conid,
-				  struct sockaddr_tipc *addr, void *usr_data,
-				  void *buf, size_t len);
+	int (*tipc_conn_recvmsg)(struct net *net, int conid,
+				 struct sockaddr_tipc *addr, void *usr_data,
+				 void *buf, size_t len);
 	struct sockaddr_tipc *saddr;
 	char name[TIPC_SERVER_NAME_LEN];
 	int imp;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 44df528..68e2647 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -289,17 +289,16 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
 	return sub;
 }
 
-static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
-				   struct tipc_subscriber *subscriber, int swap,
-				   bool status)
+static int tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
+				  struct tipc_subscriber *subscriber, int swap,
+				  bool status)
 {
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	struct tipc_subscription *sub = NULL;
 	u32 timeout;
 
 	sub = tipc_subscrp_create(net, s, swap);
 	if (!sub)
-		return tipc_conn_terminate(tn->topsrv, subscriber->conid);
+		return -1;
 
 	spin_lock_bh(&subscriber->lock);
 	list_add(&sub->subscrp_list, &subscriber->subscrp_list);
@@ -313,6 +312,7 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
 
 	if (timeout != TIPC_WAIT_FOREVER)
 		mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+	return 0;
 }
 
 /* Handle one termination request for the subscriber */
@@ -322,9 +322,9 @@ static void tipc_subscrb_release_cb(int conid, void *usr_data)
 }
 
 /* Handle one request to create a new subscription for the subscriber */
-static void tipc_subscrb_rcv_cb(struct net *net, int conid,
-				struct sockaddr_tipc *addr, void *usr_data,
-				void *buf, size_t len)
+static int tipc_subscrb_rcv_cb(struct net *net, int conid,
+			       struct sockaddr_tipc *addr, void *usr_data,
+			       void *buf, size_t len)
 {
 	struct tipc_subscriber *subscriber = usr_data;
 	struct tipc_subscr *s = (struct tipc_subscr *)buf;
@@ -338,10 +338,11 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
 	/* Detect & process a subscription cancellation request */
 	if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
 		s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
-		return tipc_subscrp_cancel(s, subscriber);
+		tipc_subscrp_cancel(s, subscriber);
+		return 0;
 	}
 	status = !(s->filter & htohl(TIPC_SUB_NO_STATUS, swap));
-	tipc_subscrp_subscribe(net, s, subscriber, swap, status);
+	return tipc_subscrp_subscribe(net, s, subscriber, swap, status);
 }
 
 /* Handle one request to establish a new subscriber */
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 73d1921..9773571 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -391,7 +391,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 
 	while (msg_data_left(msg)) {
 		if (sk->sk_err) {
-			ret = sk->sk_err;
+			ret = -sk->sk_err;
 			goto send_end;
 		}
 
@@ -544,7 +544,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
 		size_t copy, required_size;
 
 		if (sk->sk_err) {
-			ret = sk->sk_err;
+			ret = -sk->sk_err;
 			goto sendpage_end;
 		}
 
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index a9ee634..90a3784 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2869,7 +2869,6 @@ static int unix_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations unix_seq_fops = {
-	.owner		= THIS_MODULE,
 	.open		= unix_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/net/wireless/core.c b/net/wireless/core.c
index fdde0d9..a6f3cac 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -439,6 +439,8 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
 		if (rv)
 			goto use_default_name;
 	} else {
+		int rv;
+
 use_default_name:
 		/* NOTE:  This is *probably* safe w/out holding rtnl because of
 		 * the restrictions on phy names.  Probably this call could
@@ -446,7 +448,11 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
 		 * phyX.  But, might should add some locking and check return
 		 * value, and use a different name if this one exists?
 		 */
-		dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
+		rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
+		if (rv < 0) {
+			kfree(rdev);
+			return NULL;
+		}
 	}
 
 	INIT_LIST_HEAD(&rdev->wiphy.wdev_list);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index d2f7e8b..eaff636 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -507,8 +507,6 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
 void cfg80211_stop_nan(struct cfg80211_registered_device *rdev,
 		       struct wireless_dev *wdev);
 
-#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
-
 #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
 #define CFG80211_DEV_WARN_ON(cond)	WARN_ON(cond)
 #else
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c084dd2..b48eb6d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2642,12 +2642,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
 		const u8 *ssid_ie;
 		if (!wdev->current_bss)
 			break;
+		rcu_read_lock();
 		ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
 					       WLAN_EID_SSID);
-		if (!ssid_ie)
-			break;
-		if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
-			goto nla_put_failure_locked;
+		if (ssid_ie &&
+		    nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
+			goto nla_put_failure_rcu_locked;
+		rcu_read_unlock();
 		break;
 		}
 	default:
@@ -2659,6 +2660,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
 	genlmsg_end(msg, hdr);
 	return 0;
 
+ nla_put_failure_rcu_locked:
+	rcu_read_unlock();
  nla_put_failure_locked:
 	wdev_unlock(wdev);
  nla_put_failure:
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 78e71b0..7b42f0b 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1769,8 +1769,7 @@ static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx,
 	if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS)
 		return;
 
-	chan_before.center_freq = chan->center_freq;
-	chan_before.flags = chan->flags;
+	chan_before = *chan;
 
 	if (chan->flags & IEEE80211_CHAN_NO_IR) {
 		chan->flags &= ~IEEE80211_CHAN_NO_IR;
diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c
index e98a01c..5511f989 100644
--- a/net/wireless/wext-proc.c
+++ b/net/wireless/wext-proc.c
@@ -133,7 +133,6 @@ static int seq_open_wireless(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations wireless_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open    = seq_open_wireless,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 26b10eb..1472c08 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -517,7 +517,7 @@ int xfrm_trans_queue(struct sk_buff *skb,
 		return -ENOBUFS;
 
 	XFRM_TRANS_SKB_CB(skb)->finish = finish;
-	skb_queue_tail(&trans->queue, skb);
+	__skb_queue_tail(&trans->queue, skb);
 	tasklet_schedule(&trans->tasklet);
 	return 0;
 }
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d8a8129..7a23078 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -609,7 +609,8 @@ static void xfrm_hash_rebuild(struct work_struct *work)
 
 	/* re-insert all policies by order of creation */
 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
-		if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
+		if (policy->walk.dead ||
+		    xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
 			/* skip socket policies */
 			continue;
 		}
@@ -974,8 +975,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
 	}
 	if (!cnt)
 		err = -ESRCH;
-	else
-		xfrm_policy_cache_flush();
 out:
 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
 	return err;
@@ -1744,6 +1743,8 @@ void xfrm_policy_cache_flush(void)
 	bool found = 0;
 	int cpu;
 
+	might_sleep();
+
 	local_bh_disable();
 	rcu_read_lock();
 	for_each_possible_cpu(cpu) {
@@ -2064,8 +2065,11 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
 	if (num_xfrms <= 0)
 		goto make_dummy_bundle;
 
+	local_bh_disable();
 	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
-						  xflo->dst_orig);
+					      xflo->dst_orig);
+	local_bh_enable();
+
 	if (IS_ERR(xdst)) {
 		err = PTR_ERR(xdst);
 		if (err != -EAGAIN)
@@ -2152,9 +2156,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
 				goto no_transform;
 			}
 
+			local_bh_disable();
 			xdst = xfrm_resolve_and_create_bundle(
 					pols, num_pols, fl,
 					family, dst_orig);
+			local_bh_enable();
+
 			if (IS_ERR(xdst)) {
 				xfrm_pols_put(pols, num_pols);
 				err = PTR_ERR(xdst);
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index ba2b539..6d5f85f 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -71,7 +71,6 @@ static int xfrm_statistics_seq_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations xfrm_statistics_seq_fops = {
-	.owner	 = THIS_MODULE,
 	.open	 = xfrm_statistics_seq_open,
 	.read	 = seq_read,
 	.llseek	 = seq_lseek,
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index cc4c519..20b1e41 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -313,13 +313,14 @@ xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load)
 	if ((type && !try_module_get(type->owner)))
 		type = NULL;
 
+	rcu_read_unlock();
+
 	if (!type && try_load) {
 		request_module("xfrm-offload-%d-%d", family, proto);
 		try_load = 0;
 		goto retry;
 	}
 
-	rcu_read_unlock();
 	return type;
 }
 
@@ -1534,8 +1535,12 @@ int xfrm_state_update(struct xfrm_state *x)
 	err = -EINVAL;
 	spin_lock_bh(&x1->lock);
 	if (likely(x1->km.state == XFRM_STATE_VALID)) {
-		if (x->encap && x1->encap)
+		if (x->encap && x1->encap &&
+		    x->encap->encap_type == x1->encap->encap_type)
 			memcpy(x1->encap, x->encap, sizeof(*x1->encap));
+		else if (x->encap || x1->encap)
+			goto fail;
+
 		if (x->coaddr && x1->coaddr) {
 			memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
 		}
@@ -1552,6 +1557,8 @@ int xfrm_state_update(struct xfrm_state *x)
 		x->km.state = XFRM_STATE_DEAD;
 		__xfrm_state_put(x);
 	}
+
+fail:
 	spin_unlock_bh(&x1->lock);
 
 	xfrm_state_put(x1);
diff --git a/samples/bpf/xdp_monitor_kern.c b/samples/bpf/xdp_monitor_kern.c
index 2fe2f76..c969141 100644
--- a/samples/bpf/xdp_monitor_kern.c
+++ b/samples/bpf/xdp_monitor_kern.c
@@ -104,7 +104,7 @@ struct xdp_exception_ctx {
 SEC("tracepoint/xdp/xdp_exception")
 int trace_xdp_exception(struct xdp_exception_ctx *ctx)
 {
-	u64 *cnt;;
+	u64 *cnt;
 	u32 key;
 
 	key = ctx->act;
diff --git a/scripts/genksyms/.gitignore b/scripts/genksyms/.gitignore
index 86dc07a..e7836b4 100644
--- a/scripts/genksyms/.gitignore
+++ b/scripts/genksyms/.gitignore
@@ -1,4 +1,3 @@
-*.hash.c
 *.lex.c
 *.tab.c
 *.tab.h
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index cbf4996..8cee597 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -893,7 +893,10 @@ static enum string_value_kind expr_parse_string(const char *str,
 	switch (type) {
 	case S_BOOLEAN:
 	case S_TRISTATE:
-		return k_string;
+		val->s = !strcmp(str, "n") ? 0 :
+			 !strcmp(str, "m") ? 1 :
+			 !strcmp(str, "y") ? 2 : -1;
+		return k_signed;
 	case S_INT:
 		val->s = strtoll(str, &tail, 10);
 		kind = k_signed;
diff --git a/security/Kconfig b/security/Kconfig
index 3d4debd..b0cb9a5 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -63,7 +63,7 @@
 	  ensuring that the majority of kernel addresses are not mapped
 	  into userspace.
 
-	  See Documentation/x86/pagetable-isolation.txt for more details.
+	  See Documentation/x86/pti.txt for more details.
 
 config SECURITY_INFINIBAND
 	bool "Infiniband Security Hooks"
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 04ba9d0..6a54d2f 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -330,10 +330,7 @@ static struct aa_profile *__attach_match(const char *name,
 			continue;
 
 		if (profile->xmatch) {
-			if (profile->xmatch_len == len) {
-				conflict = true;
-				continue;
-			} else if (profile->xmatch_len > len) {
+			if (profile->xmatch_len >= len) {
 				unsigned int state;
 				u32 perm;
 
@@ -342,6 +339,10 @@ static struct aa_profile *__attach_match(const char *name,
 				perm = dfa_user_allow(profile->xmatch, state);
 				/* any accepting state means a valid match. */
 				if (perm & MAY_EXEC) {
+					if (profile->xmatch_len == len) {
+						conflict = true;
+						continue;
+					}
 					candidate = profile;
 					len = profile->xmatch_len;
 					conflict = false;
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
index 2b27bb7..d7b7e71 100644
--- a/security/apparmor/include/perms.h
+++ b/security/apparmor/include/perms.h
@@ -133,6 +133,9 @@ extern struct aa_perms allperms;
 #define xcheck_labels_profiles(L1, L2, FN, args...)		\
 	xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args)
 
+#define xcheck_labels(L1, L2, P, FN1, FN2)			\
+	xcheck(fn_for_each((L1), (P), (FN1)), fn_for_each((L2), (P), (FN2)))
+
 
 void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask);
 void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask);
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 7ca0032..b40678f 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -64,40 +64,48 @@ static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
 			FLAGS_NONE, GFP_ATOMIC);
 }
 
+/* assumes check for PROFILE_MEDIATES is already done */
 /* TODO: conditionals */
 static int profile_ptrace_perm(struct aa_profile *profile,
-			       struct aa_profile *peer, u32 request,
-			       struct common_audit_data *sa)
+			     struct aa_label *peer, u32 request,
+			     struct common_audit_data *sa)
 {
 	struct aa_perms perms = { };
 
-	/* need because of peer in cross check */
-	if (profile_unconfined(profile) ||
-	    !PROFILE_MEDIATES(profile, AA_CLASS_PTRACE))
-		return 0;
-
-	aad(sa)->peer = &peer->label;
-	aa_profile_match_label(profile, &peer->label, AA_CLASS_PTRACE, request,
+	aad(sa)->peer = peer;
+	aa_profile_match_label(profile, peer, AA_CLASS_PTRACE, request,
 			       &perms);
 	aa_apply_modes_to_perms(profile, &perms);
 	return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
 }
 
-static int cross_ptrace_perm(struct aa_profile *tracer,
-			     struct aa_profile *tracee, u32 request,
-			     struct common_audit_data *sa)
+static int profile_tracee_perm(struct aa_profile *tracee,
+			       struct aa_label *tracer, u32 request,
+			       struct common_audit_data *sa)
 {
+	if (profile_unconfined(tracee) || unconfined(tracer) ||
+	    !PROFILE_MEDIATES(tracee, AA_CLASS_PTRACE))
+		return 0;
+
+	return profile_ptrace_perm(tracee, tracer, request, sa);
+}
+
+static int profile_tracer_perm(struct aa_profile *tracer,
+			       struct aa_label *tracee, u32 request,
+			       struct common_audit_data *sa)
+{
+	if (profile_unconfined(tracer))
+		return 0;
+
 	if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE))
-		return xcheck(profile_ptrace_perm(tracer, tracee, request, sa),
-			      profile_ptrace_perm(tracee, tracer,
-						  request << PTRACE_PERM_SHIFT,
-						  sa));
-	/* policy uses the old style capability check for ptrace */
-	if (profile_unconfined(tracer) || tracer == tracee)
+		return profile_ptrace_perm(tracer, tracee, request, sa);
+
+	/* profile uses the old style capability check for ptrace */
+	if (&tracer->label == tracee)
 		return 0;
 
 	aad(sa)->label = &tracer->label;
-	aad(sa)->peer = &tracee->label;
+	aad(sa)->peer = tracee;
 	aad(sa)->request = 0;
 	aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1);
 
@@ -115,10 +123,13 @@ static int cross_ptrace_perm(struct aa_profile *tracer,
 int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
 		  u32 request)
 {
+	struct aa_profile *profile;
+	u32 xrequest = request << PTRACE_PERM_SHIFT;
 	DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE);
 
-	return xcheck_labels_profiles(tracer, tracee, cross_ptrace_perm,
-				      request, &sa);
+	return xcheck_labels(tracer, tracee, profile,
+			profile_tracer_perm(profile, tracee, request, &sa),
+			profile_tracee_perm(profile, tracer, xrequest, &sa));
 }
 
 
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index db7894b..faa6786 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -560,7 +560,6 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b,
 {
 	u_int64_t n = (u_int64_t) a * b;
 	if (c == 0) {
-		snd_BUG_ON(!n);
 		*r = 0;
 		return UINT_MAX;
 	}
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 6e22eea..d019134 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -221,6 +221,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
 	rwlock_init(&client->ports_lock);
 	mutex_init(&client->ports_mutex);
 	INIT_LIST_HEAD(&client->ports_list_head);
+	mutex_init(&client->ioctl_mutex);
 
 	/* find free slot in the client table */
 	spin_lock_irqsave(&clients_lock, flags);
@@ -2130,7 +2131,9 @@ static long snd_seq_ioctl(struct file *file, unsigned int cmd,
 			return -EFAULT;
 	}
 
+	mutex_lock(&client->ioctl_mutex);
 	err = handler->func(client, &buf);
+	mutex_unlock(&client->ioctl_mutex);
 	if (err >= 0) {
 		/* Some commands includes a bug in 'dir' field. */
 		if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
diff --git a/sound/core/seq/seq_clientmgr.h b/sound/core/seq/seq_clientmgr.h
index c661425..0611e1e 100644
--- a/sound/core/seq/seq_clientmgr.h
+++ b/sound/core/seq/seq_clientmgr.h
@@ -61,6 +61,7 @@ struct snd_seq_client {
 	struct list_head ports_list_head;
 	rwlock_t ports_lock;
 	struct mutex ports_mutex;
+	struct mutex ioctl_mutex;
 	int convert32;		/* convert 32->64bit */
 
 	/* output pool */
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 80bbadc..d6e079f 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -408,6 +408,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
 	/*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
 
 	/* codec SSID */
+	SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122),
 	SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
 	SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
 	SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8fd2d9c..9aafc6c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6196,6 +6196,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
 	SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
 	SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
+	SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 2237bc4..26901ec 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -39,7 +39,7 @@
 
 CFLAGS += -O2
 CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow
-CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/
+CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/
 CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"'
 LIBS = -lelf -lbfd -lopcodes $(LIBBPF)
 
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index c6a28be..099e21cf 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -66,6 +66,7 @@ static const char * const prog_type_name[] = {
 	[BPF_PROG_TYPE_LWT_XMIT]	= "lwt_xmit",
 	[BPF_PROG_TYPE_SOCK_OPS]	= "sock_ops",
 	[BPF_PROG_TYPE_SK_SKB]		= "sk_skb",
+	[BPF_PROG_TYPE_CGROUP_DEVICE]	= "cgroup_device",
 };
 
 static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 17f2c73..bc715f6 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -190,7 +190,7 @@
 	$(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
 
 $(OUTPUT)test-disassembler-four-args.bin:
-	$(BUILD) -lbfd -lopcodes
+	$(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
 
 $(OUTPUT)test-liberty.bin:
 	$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4e8c60a..69f96af 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -245,6 +245,7 @@ union bpf_attr {
 					 * BPF_F_NUMA_NODE is set).
 					 */
 		char	map_name[BPF_OBJ_NAME_LEN];
+		__u32	map_ifindex;	/* ifindex of netdev to create on */
 	};
 
 	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 8ed43ae..83714ca 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -93,7 +93,6 @@
 # Shell quotes
 libdir_SQ = $(subst ','\'',$(libdir))
 libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
 
 LIB_FILE = libbpf.a libbpf.so
 
@@ -150,7 +149,7 @@
 
 TARGETS = $(CMD_TARGETS)
 
-all: fixdep $(VERSION_FILES) all_cmd
+all: fixdep all_cmd
 
 all_cmd: $(CMD_TARGETS)
 
@@ -169,21 +168,11 @@
 $(OUTPUT)libbpf.a: $(BPF_IN)
 	$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
 
-define update_dir
-  (echo $1 > $@.tmp;				\
-   if [ -r $@ ] && cmp -s $@ $@.tmp; then	\
-     rm -f $@.tmp;				\
-   else						\
-     echo '  UPDATE                 $@';	\
-     mv -f $@.tmp $@;				\
-   fi);
-endef
-
 define do_install
 	if [ ! -d '$(DESTDIR_SQ)$2' ]; then		\
 		$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2';	\
 	fi;						\
-	$(INSTALL) $1 '$(DESTDIR_SQ)$2'
+	$(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
 endef
 
 install_lib: all_cmd
@@ -192,7 +181,8 @@
 
 install_headers:
 	$(call QUIET_INSTALL, headers) \
-		$(call do_install,bpf.h,$(prefix)/include/bpf,644)
+		$(call do_install,bpf.h,$(prefix)/include/bpf,644); \
+		$(call do_install,libbpf.h,$(prefix)/include/bpf,644);
 
 install: install_lib
 
@@ -203,7 +193,7 @@
 	$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
 
 clean:
-	$(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd \
+	$(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so .*.d .*.cmd \
 		$(RM) LIBBPF-CFLAGS
 	$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
 
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index e9c4b7c..30c7763 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1803,7 +1803,7 @@ BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
 
-#define BPF_PROG_SEC(string, type) { string, sizeof(string), type }
+#define BPF_PROG_SEC(string, type) { string, sizeof(string) - 1, type }
 static const struct {
 	const char *sec;
 	size_t len;
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index ae0272f..e6acc28 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -46,7 +46,7 @@
 	@$(MAKE) $(build)=objtool
 
 $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
-	@./sync-check.sh
+	@$(CONFIG_SHELL) ./sync-check.sh
 	$(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
 
 
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 9b341584..f40d46e 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -428,6 +428,40 @@ static void add_ignores(struct objtool_file *file)
 }
 
 /*
+ * FIXME: For now, just ignore any alternatives which add retpolines.  This is
+ * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
+ * But it at least allows objtool to understand the control flow *around* the
+ * retpoline.
+ */
+static int add_nospec_ignores(struct objtool_file *file)
+{
+	struct section *sec;
+	struct rela *rela;
+	struct instruction *insn;
+
+	sec = find_section_by_name(file->elf, ".rela.discard.nospec");
+	if (!sec)
+		return 0;
+
+	list_for_each_entry(rela, &sec->rela_list, list) {
+		if (rela->sym->type != STT_SECTION) {
+			WARN("unexpected relocation symbol type in %s", sec->name);
+			return -1;
+		}
+
+		insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (!insn) {
+			WARN("bad .discard.nospec entry");
+			return -1;
+		}
+
+		insn->ignore_alts = true;
+	}
+
+	return 0;
+}
+
+/*
  * Find the destination instructions for all jumps.
  */
 static int add_jump_destinations(struct objtool_file *file)
@@ -456,6 +490,13 @@ static int add_jump_destinations(struct objtool_file *file)
 		} else if (rela->sym->sec->idx) {
 			dest_sec = rela->sym->sec;
 			dest_off = rela->sym->sym.st_value + rela->addend + 4;
+		} else if (strstr(rela->sym->name, "_indirect_thunk_")) {
+			/*
+			 * Retpoline jumps are really dynamic jumps in
+			 * disguise, so convert them accordingly.
+			 */
+			insn->type = INSN_JUMP_DYNAMIC;
+			continue;
 		} else {
 			/* sibling call */
 			insn->jump_dest = 0;
@@ -502,11 +543,18 @@ static int add_call_destinations(struct objtool_file *file)
 			dest_off = insn->offset + insn->len + insn->immediate;
 			insn->call_dest = find_symbol_by_offset(insn->sec,
 								dest_off);
+			/*
+			 * FIXME: Thanks to retpolines, it's now considered
+			 * normal for a function to call within itself.  So
+			 * disable this warning for now.
+			 */
+#if 0
 			if (!insn->call_dest) {
 				WARN_FUNC("can't find call dest symbol at offset 0x%lx",
 					  insn->sec, insn->offset, dest_off);
 				return -1;
 			}
+#endif
 		} else if (rela->sym->type == STT_SECTION) {
 			insn->call_dest = find_symbol_by_offset(rela->sym->sec,
 								rela->addend+4);
@@ -671,12 +719,6 @@ static int add_special_section_alts(struct objtool_file *file)
 		return ret;
 
 	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
-		alt = malloc(sizeof(*alt));
-		if (!alt) {
-			WARN("malloc failed");
-			ret = -1;
-			goto out;
-		}
 
 		orig_insn = find_insn(file, special_alt->orig_sec,
 				      special_alt->orig_off);
@@ -687,6 +729,10 @@ static int add_special_section_alts(struct objtool_file *file)
 			goto out;
 		}
 
+		/* Ignore retpoline alternatives. */
+		if (orig_insn->ignore_alts)
+			continue;
+
 		new_insn = NULL;
 		if (!special_alt->group || special_alt->new_len) {
 			new_insn = find_insn(file, special_alt->new_sec,
@@ -712,6 +758,13 @@ static int add_special_section_alts(struct objtool_file *file)
 				goto out;
 		}
 
+		alt = malloc(sizeof(*alt));
+		if (!alt) {
+			WARN("malloc failed");
+			ret = -1;
+			goto out;
+		}
+
 		alt->insn = new_insn;
 		list_add_tail(&alt->list, &orig_insn->alts);
 
@@ -1028,6 +1081,10 @@ static int decode_sections(struct objtool_file *file)
 
 	add_ignores(file);
 
+	ret = add_nospec_ignores(file);
+	if (ret)
+		return ret;
+
 	ret = add_jump_destinations(file);
 	if (ret)
 		return ret;
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index 47d9ea7..dbadb30 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -44,7 +44,7 @@ struct instruction {
 	unsigned int len;
 	unsigned char type;
 	unsigned long immediate;
-	bool alt_group, visited, dead_end, ignore, hint, save, restore;
+	bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
 	struct symbol *call_dest;
 	struct instruction *jump_dest;
 	struct list_head alts;
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 5438479..9601798 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -274,6 +274,46 @@ static struct bpf_test tests[] = {
 		.result = REJECT,
 	},
 	{
+		"arsh32 on imm",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "BPF_ARSH not supported for 32 bit ALU",
+	},
+	{
+		"arsh32 on reg",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_MOV64_IMM(BPF_REG_1, 5),
+			BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "BPF_ARSH not supported for 32 bit ALU",
+	},
+	{
+		"arsh64 on imm",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+	},
+	{
+		"arsh64 on reg",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_MOV64_IMM(BPF_REG_1, 5),
+			BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+	},
+	{
 		"no bpf_exit",
 		.insns = {
 			BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 939a337..5d4f10a 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -7,7 +7,7 @@
 
 TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
 			check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \
-			protection_keys test_vdso
+			protection_keys test_vdso test_vsyscall
 TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
 			test_FCMOV test_FCOMI test_FISTTP \
 			vdso_restorer
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
new file mode 100644
index 0000000..7a744fa
--- /dev/null
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -0,0 +1,500 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <sys/time.h>
+#include <time.h>
+#include <stdlib.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <dlfcn.h>
+#include <string.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <sys/ucontext.h>
+#include <errno.h>
+#include <err.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <setjmp.h>
+
+#ifdef __x86_64__
+# define VSYS(x) (x)
+#else
+# define VSYS(x) 0
+#endif
+
+#ifndef SYS_getcpu
+# ifdef __x86_64__
+#  define SYS_getcpu 309
+# else
+#  define SYS_getcpu 318
+# endif
+#endif
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+		       int flags)
+{
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_sigaction = handler;
+	sa.sa_flags = SA_SIGINFO | flags;
+	sigemptyset(&sa.sa_mask);
+	if (sigaction(sig, &sa, 0))
+		err(1, "sigaction");
+}
+
+/* vsyscalls and vDSO */
+bool should_read_vsyscall = false;
+
+typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
+gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000);
+gtod_t vdso_gtod;
+
+typedef int (*vgettime_t)(clockid_t, struct timespec *);
+vgettime_t vdso_gettime;
+
+typedef long (*time_func_t)(time_t *t);
+time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400);
+time_func_t vdso_time;
+
+typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
+getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
+getcpu_t vdso_getcpu;
+
+static void init_vdso(void)
+{
+	void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+	if (!vdso)
+		vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+	if (!vdso) {
+		printf("[WARN]\tfailed to find vDSO\n");
+		return;
+	}
+
+	vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday");
+	if (!vdso_gtod)
+		printf("[WARN]\tfailed to find gettimeofday in vDSO\n");
+
+	vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
+	if (!vdso_gettime)
+		printf("[WARN]\tfailed to find clock_gettime in vDSO\n");
+
+	vdso_time = (time_func_t)dlsym(vdso, "__vdso_time");
+	if (!vdso_time)
+		printf("[WARN]\tfailed to find time in vDSO\n");
+
+	vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
+	if (!vdso_getcpu) {
+		/* getcpu() was never wired up in the 32-bit vDSO. */
+		printf("[%s]\tfailed to find getcpu in vDSO\n",
+		       sizeof(long) == 8 ? "WARN" : "NOTE");
+	}
+}
+
+static int init_vsys(void)
+{
+#ifdef __x86_64__
+	int nerrs = 0;
+	FILE *maps;
+	char line[128];
+	bool found = false;
+
+	maps = fopen("/proc/self/maps", "r");
+	if (!maps) {
+		printf("[WARN]\tCould not open /proc/self/maps -- assuming vsyscall is r-x\n");
+		should_read_vsyscall = true;
+		return 0;
+	}
+
+	while (fgets(line, sizeof(line), maps)) {
+		char r, x;
+		void *start, *end;
+		char name[128];
+		if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
+			   &start, &end, &r, &x, name) != 5)
+			continue;
+
+		if (strcmp(name, "[vsyscall]"))
+			continue;
+
+		printf("\tvsyscall map: %s", line);
+
+		if (start != (void *)0xffffffffff600000 ||
+		    end != (void *)0xffffffffff601000) {
+			printf("[FAIL]\taddress range is nonsense\n");
+			nerrs++;
+		}
+
+		printf("\tvsyscall permissions are %c-%c\n", r, x);
+		should_read_vsyscall = (r == 'r');
+		if (x != 'x') {
+			vgtod = NULL;
+			vtime = NULL;
+			vgetcpu = NULL;
+		}
+
+		found = true;
+		break;
+	}
+
+	fclose(maps);
+
+	if (!found) {
+		printf("\tno vsyscall map in /proc/self/maps\n");
+		should_read_vsyscall = false;
+		vgtod = NULL;
+		vtime = NULL;
+		vgetcpu = NULL;
+	}
+
+	return nerrs;
+#else
+	return 0;
+#endif
+}
+
+/* syscalls */
+static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
+{
+	return syscall(SYS_gettimeofday, tv, tz);
+}
+
+static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
+{
+	return syscall(SYS_clock_gettime, id, ts);
+}
+
+static inline long sys_time(time_t *t)
+{
+	return syscall(SYS_time, t);
+}
+
+static inline long sys_getcpu(unsigned * cpu, unsigned * node,
+			      void* cache)
+{
+	return syscall(SYS_getcpu, cpu, node, cache);
+}
+
+static jmp_buf jmpbuf;
+
+static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
+{
+	siglongjmp(jmpbuf, 1);
+}
+
+static double tv_diff(const struct timeval *a, const struct timeval *b)
+{
+	return (double)(a->tv_sec - b->tv_sec) +
+		(double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6;
+}
+
+static int check_gtod(const struct timeval *tv_sys1,
+		      const struct timeval *tv_sys2,
+		      const struct timezone *tz_sys,
+		      const char *which,
+		      const struct timeval *tv_other,
+		      const struct timezone *tz_other)
+{
+	int nerrs = 0;
+	double d1, d2;
+
+	if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest || tz_sys->tz_dsttime != tz_other->tz_dsttime)) {
+		printf("[FAIL] %s tz mismatch\n", which);
+		nerrs++;
+	}
+
+	d1 = tv_diff(tv_other, tv_sys1);
+	d2 = tv_diff(tv_sys2, tv_other); 
+	printf("\t%s time offsets: %lf %lf\n", which, d1, d2);
+
+	if (d1 < 0 || d2 < 0) {
+		printf("[FAIL]\t%s time was inconsistent with the syscall\n", which);
+		nerrs++;
+	} else {
+		printf("[OK]\t%s gettimeofday()'s timeval was okay\n", which);
+	}
+
+	return nerrs;
+}
+
+static int test_gtod(void)
+{
+	struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys;
+	struct timezone tz_sys, tz_vdso, tz_vsys;
+	long ret_vdso = -1;
+	long ret_vsys = -1;
+	int nerrs = 0;
+
+	printf("[RUN]\ttest gettimeofday()\n");
+
+	if (sys_gtod(&tv_sys1, &tz_sys) != 0)
+		err(1, "syscall gettimeofday");
+	if (vdso_gtod)
+		ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso);
+	if (vgtod)
+		ret_vsys = vgtod(&tv_vsys, &tz_vsys);
+	if (sys_gtod(&tv_sys2, &tz_sys) != 0)
+		err(1, "syscall gettimeofday");
+
+	if (vdso_gtod) {
+		if (ret_vdso == 0) {
+			nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso);
+		} else {
+			printf("[FAIL]\tvDSO gettimeofday() failed: %ld\n", ret_vdso);
+			nerrs++;
+		}
+	}
+
+	if (vgtod) {
+		if (ret_vsys == 0) {
+			nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys);
+		} else {
+			printf("[FAIL]\tvsys gettimeofday() failed: %ld\n", ret_vsys);
+			nerrs++;
+		}
+	}
+
+	return nerrs;
+}
+
+static int test_time(void) {
+	int nerrs = 0;
+
+	printf("[RUN]\ttest time()\n");
+	long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0;
+	long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1;
+	t_sys1 = sys_time(&t2_sys1);
+	if (vdso_time)
+		t_vdso = vdso_time(&t2_vdso);
+	if (vtime)
+		t_vsys = vtime(&t2_vsys);
+	t_sys2 = sys_time(&t2_sys2);
+	if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) {
+		printf("[FAIL]\tsyscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n", t_sys1, t2_sys1, t_sys2, t2_sys2);
+		nerrs++;
+		return nerrs;
+	}
+
+	if (vdso_time) {
+		if (t_vdso < 0 || t_vdso != t2_vdso) {
+			printf("[FAIL]\tvDSO failed (ret:%ld output:%ld)\n", t_vdso, t2_vdso);
+			nerrs++;
+		} else if (t_vdso < t_sys1 || t_vdso > t_sys2) {
+			printf("[FAIL]\tvDSO returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vdso, t_sys2);
+			nerrs++;
+		} else {
+			printf("[OK]\tvDSO time() is okay\n");
+		}
+	}
+
+	if (vtime) {
+		if (t_vsys < 0 || t_vsys != t2_vsys) {
+			printf("[FAIL]\tvsyscall failed (ret:%ld output:%ld)\n", t_vsys, t2_vsys);
+			nerrs++;
+		} else if (t_vsys < t_sys1 || t_vsys > t_sys2) {
+			printf("[FAIL]\tvsyscall returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vsys, t_sys2);
+			nerrs++;
+		} else {
+			printf("[OK]\tvsyscall time() is okay\n");
+		}
+	}
+
+	return nerrs;
+}
+
+static int test_getcpu(int cpu)
+{
+	int nerrs = 0;
+	long ret_sys, ret_vdso = -1, ret_vsys = -1;
+
+	printf("[RUN]\tgetcpu() on CPU %d\n", cpu);
+
+	cpu_set_t cpuset;
+	CPU_ZERO(&cpuset);
+	CPU_SET(cpu, &cpuset);
+	if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
+		printf("[SKIP]\tfailed to force CPU %d\n", cpu);
+		return nerrs;
+	}
+
+	unsigned cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys;
+	unsigned node = 0;
+	bool have_node = false;
+	ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
+	if (vdso_getcpu)
+		ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
+	if (vgetcpu)
+		ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
+
+	if (ret_sys == 0) {
+		if (cpu_sys != cpu) {
+			printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu);
+			nerrs++;
+		}
+
+		have_node = true;
+		node = node_sys;
+	}
+
+	if (vdso_getcpu) {
+		if (ret_vdso) {
+			printf("[FAIL]\tvDSO getcpu() failed\n");
+			nerrs++;
+		} else {
+			if (!have_node) {
+				have_node = true;
+				node = node_vdso;
+			}
+
+			if (cpu_vdso != cpu) {
+				printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu);
+				nerrs++;
+			} else {
+				printf("[OK]\tvDSO reported correct CPU\n");
+			}
+
+			if (node_vdso != node) {
+				printf("[FAIL]\tvDSO reported node %hu but should be %hu\n", node_vdso, node);
+				nerrs++;
+			} else {
+				printf("[OK]\tvDSO reported correct node\n");
+			}
+		}
+	}
+
+	if (vgetcpu) {
+		if (ret_vsys) {
+			printf("[FAIL]\tvsyscall getcpu() failed\n");
+			nerrs++;
+		} else {
+			if (!have_node) {
+				have_node = true;
+				node = node_vsys;
+			}
+
+			if (cpu_vsys != cpu) {
+				printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu);
+				nerrs++;
+			} else {
+				printf("[OK]\tvsyscall reported correct CPU\n");
+			}
+
+			if (node_vsys != node) {
+				printf("[FAIL]\tvsyscall reported node %hu but should be %hu\n", node_vsys, node);
+				nerrs++;
+			} else {
+				printf("[OK]\tvsyscall reported correct node\n");
+			}
+		}
+	}
+
+	return nerrs;
+}
+
+static int test_vsys_r(void)
+{
+#ifdef __x86_64__
+	printf("[RUN]\tChecking read access to the vsyscall page\n");
+	bool can_read;
+	if (sigsetjmp(jmpbuf, 1) == 0) {
+		*(volatile int *)0xffffffffff600000;
+		can_read = true;
+	} else {
+		can_read = false;
+	}
+
+	if (can_read && !should_read_vsyscall) {
+		printf("[FAIL]\tWe have read access, but we shouldn't\n");
+		return 1;
+	} else if (!can_read && should_read_vsyscall) {
+		printf("[FAIL]\tWe don't have read access, but we should\n");
+		return 1;
+	} else {
+		printf("[OK]\tgot expected result\n");
+	}
+#endif
+
+	return 0;
+}
+
+
+#ifdef __x86_64__
+#define X86_EFLAGS_TF (1UL << 8)
+static volatile sig_atomic_t num_vsyscall_traps;
+
+static unsigned long get_eflags(void)
+{
+	unsigned long eflags;
+	asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags));
+	return eflags;
+}
+
+static void set_eflags(unsigned long eflags)
+{
+	asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags");
+}
+
+static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
+{
+	ucontext_t *ctx = (ucontext_t *)ctx_void;
+	unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP];
+
+	if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0)
+		num_vsyscall_traps++;
+}
+
+static int test_native_vsyscall(void)
+{
+	time_t tmp;
+	bool is_native;
+
+	if (!vtime)
+		return 0;
+
+	printf("[RUN]\tchecking for native vsyscall\n");
+	sethandler(SIGTRAP, sigtrap, 0);
+	set_eflags(get_eflags() | X86_EFLAGS_TF);
+	vtime(&tmp);
+	set_eflags(get_eflags() & ~X86_EFLAGS_TF);
+
+	/*
+	 * If vsyscalls are emulated, we expect a single trap in the
+	 * vsyscall page -- the call instruction will trap with RIP
+	 * pointing to the entry point before emulation takes over.
+	 * In native mode, we expect two traps, since whatever code
+	 * the vsyscall page contains will be more than just a ret
+	 * instruction.
+	 */
+	is_native = (num_vsyscall_traps > 1);
+
+	printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n",
+	       (is_native ? "native" : "emulated"),
+	       (int)num_vsyscall_traps);
+
+	return 0;
+}
+#endif
+
+int main(int argc, char **argv)
+{
+	int nerrs = 0;
+
+	init_vdso();
+	nerrs += init_vsys();
+
+	nerrs += test_gtod();
+	nerrs += test_time();
+	nerrs += test_getcpu(0);
+	nerrs += test_getcpu(1);
+
+	sethandler(SIGSEGV, sigsegv, 0);
+	nerrs += test_vsys_r();
+
+#ifdef __x86_64__
+	nerrs += test_native_vsyscall();
+#endif
+
+	return nerrs ? 1 : 0;
+}