qed*: Utilize Firmware 8.15.3.0

This patch advances the qed* drivers into using the newer firmware -
This solves several firmware bugs, mostly related [but not limited to]
various init/deinit issues in various offloaded protocols.

It also introduces a major 4-Cached SGE change in firmware, which can be
seen in the storage drivers' changes.

In addition, this firmware is required for supporting the new QL41xxx
series of adapters; While this patch doesn't add the actual support,
the firmware contains the necessary initialization & firmware logic to
operate such adapters [actual support would be added later on].

Changes from Previous versions:
-------------------------------
 - V2 - fix kbuild-test robot warnings

Signed-off-by: Tomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
Signed-off-by: Manish Rangankar <Manish.Rangankar@cavium.com>
Signed-off-by: Chad Dupuis <Chad.Dupuis@cavium.com>
Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 00c17fa..be99092 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -51,7 +51,7 @@
 #include "qed_hsi.h"
 
 extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.10.10.20"
+#define DRV_MODULE_VERSION "8.10.10.21"
 
 #define MAX_HWFNS_PER_DEVICE    (4)
 #define NAME_SIZE 16
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index d42d03d..89d210a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -71,8 +71,7 @@
 #define TM_ALIGN        BIT(TM_SHIFT)
 #define TM_ELEM_SIZE    4
 
-/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
-#define ILT_DEFAULT_HW_P_SIZE	(IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
+#define ILT_DEFAULT_HW_P_SIZE	4
 
 #define ILT_PAGE_IN_BYTES(hw_p_size)	(1U << ((hw_p_size) + 12))
 #define ILT_CFG_REG(cli, reg)	PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -1126,7 +1125,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
 	clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
 	clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
 	clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
-	/* default ILT page size for all clients is 32K */
+	/* default ILT page size for all clients is 64K */
 	for (i = 0; i < ILT_CLI_MAX; i++)
 		p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 68f19ca..5e81e8a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -17,7 +17,6 @@
 
 /* Chip IDs enum */
 enum chip_ids {
-	CHIP_RESERVED,
 	CHIP_BB_B0,
 	CHIP_K2,
 	MAX_CHIP_IDS
@@ -40,6 +39,7 @@ enum mem_groups {
 	MEM_GROUP_BTB_RAM,
 	MEM_GROUP_RDIF_CTX,
 	MEM_GROUP_TDIF_CTX,
+	MEM_GROUP_CFC_MEM,
 	MEM_GROUP_CONN_CFC_MEM,
 	MEM_GROUP_TASK_CFC_MEM,
 	MEM_GROUP_CAU_PI,
@@ -72,6 +72,7 @@ static const char * const s_mem_group_names[] = {
 	"BTB_RAM",
 	"RDIF_CTX",
 	"TDIF_CTX",
+	"CFC_MEM",
 	"CONN_CFC_MEM",
 	"TASK_CFC_MEM",
 	"CAU_PI",
@@ -185,13 +186,16 @@ struct dbg_array {
 	u32 size_in_dwords;
 };
 
+struct chip_platform_defs {
+	u8 num_ports;
+	u8 num_pfs;
+	u8 num_vfs;
+};
+
 /* Chip constant definitions */
 struct chip_defs {
 	const char *name;
-	struct {
-		u8 num_ports;
-		u8 num_pfs;
-	} per_platform[MAX_PLATFORM_IDS];
+	struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
 };
 
 /* Platform constant definitions */
@@ -405,22 +409,23 @@ struct phy_defs {
 /***************************** Constant Arrays *******************************/
 
 /* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
 
 /* Chip constant definitions array */
 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
-	{ "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
 	{ "bb_b0",
-	  { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } },
-	{ "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } }
+	  { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
+	    {0, 0, 0}, {0, 0, 0} } },
+	{ "k2",
+	  { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
+	    {0, 0, 0}, {0, 0, 0} } }
 };
 
 /* Storm constant definitions array */
 static struct storm_defs s_storm_defs[] = {
 	/* Tstorm */
 	{'T', BLOCK_TSEM,
-	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
-	  DBG_BUS_CLIENT_RBCT}, true,
+	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
 	 TSEM_REG_FAST_MEMORY,
 	 TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
 	 TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
@@ -432,8 +437,7 @@ static struct storm_defs s_storm_defs[] = {
 	 4, TCM_REG_SM_TASK_CTX},
 	/* Mstorm */
 	{'M', BLOCK_MSEM,
-	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
-	  DBG_BUS_CLIENT_RBCM}, false,
+	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
 	 MSEM_REG_FAST_MEMORY,
 	 MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
 	 MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
@@ -445,8 +449,7 @@ static struct storm_defs s_storm_defs[] = {
 	 7, MCM_REG_SM_TASK_CTX},
 	/* Ustorm */
 	{'U', BLOCK_USEM,
-	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
-	  DBG_BUS_CLIENT_RBCU}, false,
+	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
 	 USEM_REG_FAST_MEMORY,
 	 USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
 	 USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
@@ -458,8 +461,7 @@ static struct storm_defs s_storm_defs[] = {
 	 3, UCM_REG_SM_TASK_CTX},
 	/* Xstorm */
 	{'X', BLOCK_XSEM,
-	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
-	  DBG_BUS_CLIENT_RBCX}, false,
+	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
 	 XSEM_REG_FAST_MEMORY,
 	 XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
 	 XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
@@ -471,8 +473,7 @@ static struct storm_defs s_storm_defs[] = {
 	 0, 0},
 	/* Ystorm */
 	{'Y', BLOCK_YSEM,
-	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
-	  DBG_BUS_CLIENT_RBCY}, false,
+	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
 	 YSEM_REG_FAST_MEMORY,
 	 YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
 	 YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
@@ -484,8 +485,7 @@ static struct storm_defs s_storm_defs[] = {
 	 12, YCM_REG_SM_TASK_CTX},
 	/* Pstorm */
 	{'P', BLOCK_PSEM,
-	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
-	  DBG_BUS_CLIENT_RBCS}, true,
+	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
 	 PSEM_REG_FAST_MEMORY,
 	 PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
 	 PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
@@ -499,8 +499,9 @@ static struct storm_defs s_storm_defs[] = {
 
 /* Block definitions array */
 static struct block_defs block_grc_defs = {
-	"grc", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+	"grc",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
 	GRC_REG_DBG_FORCE_FRAME,
@@ -508,29 +509,30 @@ static struct block_defs block_grc_defs = {
 };
 
 static struct block_defs block_miscs_defs = {
-	"miscs", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"miscs", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_misc_defs = {
-	"misc", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"misc", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbu_defs = {
-	"dbu", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"dbu", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_pglue_b_defs = {
-	"pglue_b", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+	"pglue_b",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
 	PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -538,8 +540,9 @@ static struct block_defs block_pglue_b_defs = {
 };
 
 static struct block_defs block_cnig_defs = {
-	"cnig", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+	"cnig",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
 	CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
 	CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
 	CNIG_REG_DBG_FORCE_FRAME_K2,
@@ -547,15 +550,16 @@ static struct block_defs block_cnig_defs = {
 };
 
 static struct block_defs block_cpmu_defs = {
-	"cpmu", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"cpmu", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
 };
 
 static struct block_defs block_ncsi_defs = {
-	"ncsi", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+	"ncsi",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
 	NCSI_REG_DBG_FORCE_FRAME,
@@ -563,15 +567,16 @@ static struct block_defs block_ncsi_defs = {
 };
 
 static struct block_defs block_opte_defs = {
-	"opte", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"opte", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
 };
 
 static struct block_defs block_bmb_defs = {
-	"bmb", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+	"bmb",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
 	BMB_REG_DBG_FORCE_FRAME,
@@ -579,8 +584,9 @@ static struct block_defs block_bmb_defs = {
 };
 
 static struct block_defs block_pcie_defs = {
-	"pcie", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+	"pcie",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
 	PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
 	PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
 	PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -588,15 +594,16 @@ static struct block_defs block_pcie_defs = {
 };
 
 static struct block_defs block_mcp_defs = {
-	"mcp", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"mcp", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_mcp2_defs = {
-	"mcp2", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+	"mcp2",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
 	MCP2_REG_DBG_FORCE_FRAME,
@@ -604,8 +611,9 @@ static struct block_defs block_mcp2_defs = {
 };
 
 static struct block_defs block_pswhst_defs = {
-	"pswhst", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswhst",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
 	PSWHST_REG_DBG_FORCE_FRAME,
@@ -613,8 +621,9 @@ static struct block_defs block_pswhst_defs = {
 };
 
 static struct block_defs block_pswhst2_defs = {
-	"pswhst2", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswhst2",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
 	PSWHST2_REG_DBG_FORCE_FRAME,
@@ -622,8 +631,9 @@ static struct block_defs block_pswhst2_defs = {
 };
 
 static struct block_defs block_pswrd_defs = {
-	"pswrd", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswrd",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
 	PSWRD_REG_DBG_FORCE_FRAME,
@@ -631,8 +641,9 @@ static struct block_defs block_pswrd_defs = {
 };
 
 static struct block_defs block_pswrd2_defs = {
-	"pswrd2", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswrd2",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
 	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
 	PSWRD2_REG_DBG_FORCE_FRAME,
@@ -640,8 +651,9 @@ static struct block_defs block_pswrd2_defs = {
 };
 
 static struct block_defs block_pswwr_defs = {
-	"pswwr", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswwr",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
 	PSWWR_REG_DBG_FORCE_FRAME,
@@ -649,15 +661,16 @@ static struct block_defs block_pswwr_defs = {
 };
 
 static struct block_defs block_pswwr2_defs = {
-	"pswwr2", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"pswwr2", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
 };
 
 static struct block_defs block_pswrq_defs = {
-	"pswrq", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswrq",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
 	PSWRQ_REG_DBG_FORCE_FRAME,
@@ -665,8 +678,9 @@ static struct block_defs block_pswrq_defs = {
 };
 
 static struct block_defs block_pswrq2_defs = {
-	"pswrq2", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswrq2",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
 	PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -674,8 +688,9 @@ static struct block_defs block_pswrq2_defs = {
 };
 
 static struct block_defs block_pglcs_defs = {
-	"pglcs", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+	"pglcs",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
 	PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
 	PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
 	PGLCS_REG_DBG_FORCE_FRAME,
@@ -683,8 +698,9 @@ static struct block_defs block_pglcs_defs = {
 };
 
 static struct block_defs block_ptu_defs = {
-	"ptu", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"ptu",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
 	PTU_REG_DBG_FORCE_FRAME,
@@ -692,8 +708,9 @@ static struct block_defs block_ptu_defs = {
 };
 
 static struct block_defs block_dmae_defs = {
-	"dmae", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"dmae",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
 	DMAE_REG_DBG_FORCE_FRAME,
@@ -701,8 +718,9 @@ static struct block_defs block_dmae_defs = {
 };
 
 static struct block_defs block_tcm_defs = {
-	"tcm", {true, true, true}, true, DBG_TSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	"tcm",
+	{true, true}, true, DBG_TSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
 	TCM_REG_DBG_FORCE_FRAME,
@@ -710,8 +728,9 @@ static struct block_defs block_tcm_defs = {
 };
 
 static struct block_defs block_mcm_defs = {
-	"mcm", {true, true, true}, true, DBG_MSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"mcm",
+	{true, true}, true, DBG_MSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
 	MCM_REG_DBG_FORCE_FRAME,
@@ -719,8 +738,9 @@ static struct block_defs block_mcm_defs = {
 };
 
 static struct block_defs block_ucm_defs = {
-	"ucm", {true, true, true}, true, DBG_USTORM_ID,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	"ucm",
+	{true, true}, true, DBG_USTORM_ID,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
 	UCM_REG_DBG_FORCE_FRAME,
@@ -728,8 +748,9 @@ static struct block_defs block_ucm_defs = {
 };
 
 static struct block_defs block_xcm_defs = {
-	"xcm", {true, true, true}, true, DBG_XSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	"xcm",
+	{true, true}, true, DBG_XSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
 	XCM_REG_DBG_FORCE_FRAME,
@@ -737,8 +758,9 @@ static struct block_defs block_xcm_defs = {
 };
 
 static struct block_defs block_ycm_defs = {
-	"ycm", {true, true, true}, true, DBG_YSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	"ycm",
+	{true, true}, true, DBG_YSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
 	YCM_REG_DBG_FORCE_FRAME,
@@ -746,8 +768,9 @@ static struct block_defs block_ycm_defs = {
 };
 
 static struct block_defs block_pcm_defs = {
-	"pcm", {true, true, true}, true, DBG_PSTORM_ID,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	"pcm",
+	{true, true}, true, DBG_PSTORM_ID,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
 	PCM_REG_DBG_FORCE_FRAME,
@@ -755,8 +778,9 @@ static struct block_defs block_pcm_defs = {
 };
 
 static struct block_defs block_qm_defs = {
-	"qm", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+	"qm",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
 	QM_REG_DBG_FORCE_FRAME,
@@ -764,8 +788,9 @@ static struct block_defs block_qm_defs = {
 };
 
 static struct block_defs block_tm_defs = {
-	"tm", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	"tm",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
 	TM_REG_DBG_FORCE_FRAME,
@@ -773,8 +798,9 @@ static struct block_defs block_tm_defs = {
 };
 
 static struct block_defs block_dorq_defs = {
-	"dorq", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	"dorq",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
 	DORQ_REG_DBG_FORCE_FRAME,
@@ -782,8 +808,9 @@ static struct block_defs block_dorq_defs = {
 };
 
 static struct block_defs block_brb_defs = {
-	"brb", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+	"brb",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
 	BRB_REG_DBG_FORCE_FRAME,
@@ -791,8 +818,9 @@ static struct block_defs block_brb_defs = {
 };
 
 static struct block_defs block_src_defs = {
-	"src", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	"src",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
 	SRC_REG_DBG_FORCE_FRAME,
@@ -800,8 +828,9 @@ static struct block_defs block_src_defs = {
 };
 
 static struct block_defs block_prs_defs = {
-	"prs", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+	"prs",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
 	PRS_REG_DBG_FORCE_FRAME,
@@ -809,8 +838,9 @@ static struct block_defs block_prs_defs = {
 };
 
 static struct block_defs block_tsdm_defs = {
-	"tsdm", {true, true, true}, true, DBG_TSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	"tsdm",
+	{true, true}, true, DBG_TSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
 	TSDM_REG_DBG_FORCE_FRAME,
@@ -818,8 +848,9 @@ static struct block_defs block_tsdm_defs = {
 };
 
 static struct block_defs block_msdm_defs = {
-	"msdm", {true, true, true}, true, DBG_MSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"msdm",
+	{true, true}, true, DBG_MSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
 	MSDM_REG_DBG_FORCE_FRAME,
@@ -827,8 +858,9 @@ static struct block_defs block_msdm_defs = {
 };
 
 static struct block_defs block_usdm_defs = {
-	"usdm", {true, true, true}, true, DBG_USTORM_ID,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	"usdm",
+	{true, true}, true, DBG_USTORM_ID,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
 	USDM_REG_DBG_FORCE_FRAME,
@@ -836,8 +868,9 @@ static struct block_defs block_usdm_defs = {
 };
 
 static struct block_defs block_xsdm_defs = {
-	"xsdm", {true, true, true}, true, DBG_XSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	"xsdm",
+	{true, true}, true, DBG_XSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
 	XSDM_REG_DBG_FORCE_FRAME,
@@ -845,8 +878,9 @@ static struct block_defs block_xsdm_defs = {
 };
 
 static struct block_defs block_ysdm_defs = {
-	"ysdm", {true, true, true}, true, DBG_YSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	"ysdm",
+	{true, true}, true, DBG_YSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
 	YSDM_REG_DBG_FORCE_FRAME,
@@ -854,8 +888,9 @@ static struct block_defs block_ysdm_defs = {
 };
 
 static struct block_defs block_psdm_defs = {
-	"psdm", {true, true, true}, true, DBG_PSTORM_ID,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	"psdm",
+	{true, true}, true, DBG_PSTORM_ID,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
 	PSDM_REG_DBG_FORCE_FRAME,
@@ -863,8 +898,9 @@ static struct block_defs block_psdm_defs = {
 };
 
 static struct block_defs block_tsem_defs = {
-	"tsem", {true, true, true}, true, DBG_TSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	"tsem",
+	{true, true}, true, DBG_TSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
 	TSEM_REG_DBG_FORCE_FRAME,
@@ -872,8 +908,9 @@ static struct block_defs block_tsem_defs = {
 };
 
 static struct block_defs block_msem_defs = {
-	"msem", {true, true, true}, true, DBG_MSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"msem",
+	{true, true}, true, DBG_MSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
 	MSEM_REG_DBG_FORCE_FRAME,
@@ -881,8 +918,9 @@ static struct block_defs block_msem_defs = {
 };
 
 static struct block_defs block_usem_defs = {
-	"usem", {true, true, true}, true, DBG_USTORM_ID,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	"usem",
+	{true, true}, true, DBG_USTORM_ID,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
 	USEM_REG_DBG_FORCE_FRAME,
@@ -890,8 +928,9 @@ static struct block_defs block_usem_defs = {
 };
 
 static struct block_defs block_xsem_defs = {
-	"xsem", {true, true, true}, true, DBG_XSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	"xsem",
+	{true, true}, true, DBG_XSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
 	XSEM_REG_DBG_FORCE_FRAME,
@@ -899,8 +938,9 @@ static struct block_defs block_xsem_defs = {
 };
 
 static struct block_defs block_ysem_defs = {
-	"ysem", {true, true, true}, true, DBG_YSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	"ysem",
+	{true, true}, true, DBG_YSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
 	YSEM_REG_DBG_FORCE_FRAME,
@@ -908,8 +948,9 @@ static struct block_defs block_ysem_defs = {
 };
 
 static struct block_defs block_psem_defs = {
-	"psem", {true, true, true}, true, DBG_PSTORM_ID,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	"psem",
+	{true, true}, true, DBG_PSTORM_ID,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
 	PSEM_REG_DBG_FORCE_FRAME,
@@ -917,8 +958,9 @@ static struct block_defs block_psem_defs = {
 };
 
 static struct block_defs block_rss_defs = {
-	"rss", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	"rss",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
 	RSS_REG_DBG_FORCE_FRAME,
@@ -926,8 +968,9 @@ static struct block_defs block_rss_defs = {
 };
 
 static struct block_defs block_tmld_defs = {
-	"tmld", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"tmld",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
 	TMLD_REG_DBG_FORCE_FRAME,
@@ -935,8 +978,9 @@ static struct block_defs block_tmld_defs = {
 };
 
 static struct block_defs block_muld_defs = {
-	"muld", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	"muld",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
 	MULD_REG_DBG_FORCE_FRAME,
@@ -944,8 +988,9 @@ static struct block_defs block_muld_defs = {
 };
 
 static struct block_defs block_yuld_defs = {
-	"yuld", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	"yuld",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
 	YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
 	YULD_REG_DBG_FORCE_FRAME,
@@ -953,8 +998,9 @@ static struct block_defs block_yuld_defs = {
 };
 
 static struct block_defs block_xyld_defs = {
-	"xyld", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	"xyld",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
 	XYLD_REG_DBG_FORCE_FRAME,
@@ -962,8 +1008,9 @@ static struct block_defs block_xyld_defs = {
 };
 
 static struct block_defs block_prm_defs = {
-	"prm", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"prm",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
 	PRM_REG_DBG_FORCE_FRAME,
@@ -971,8 +1018,9 @@ static struct block_defs block_prm_defs = {
 };
 
 static struct block_defs block_pbf_pb1_defs = {
-	"pbf_pb1", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+	"pbf_pb1",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
 	PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -981,8 +1029,9 @@ static struct block_defs block_pbf_pb1_defs = {
 };
 
 static struct block_defs block_pbf_pb2_defs = {
-	"pbf_pb2", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+	"pbf_pb2",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
 	PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -991,8 +1040,9 @@ static struct block_defs block_pbf_pb2_defs = {
 };
 
 static struct block_defs block_rpb_defs = {
-	"rpb", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"rpb",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
 	RPB_REG_DBG_FORCE_FRAME,
@@ -1000,8 +1050,9 @@ static struct block_defs block_rpb_defs = {
 };
 
 static struct block_defs block_btb_defs = {
-	"btb", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+	"btb",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
 	BTB_REG_DBG_FORCE_FRAME,
@@ -1009,8 +1060,9 @@ static struct block_defs block_btb_defs = {
 };
 
 static struct block_defs block_pbf_defs = {
-	"pbf", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+	"pbf",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
 	PBF_REG_DBG_FORCE_FRAME,
@@ -1018,8 +1070,9 @@ static struct block_defs block_pbf_defs = {
 };
 
 static struct block_defs block_rdif_defs = {
-	"rdif", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"rdif",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
 	RDIF_REG_DBG_FORCE_FRAME,
@@ -1027,8 +1080,9 @@ static struct block_defs block_rdif_defs = {
 };
 
 static struct block_defs block_tdif_defs = {
-	"tdif", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	"tdif",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
 	TDIF_REG_DBG_FORCE_FRAME,
@@ -1036,8 +1090,9 @@ static struct block_defs block_tdif_defs = {
 };
 
 static struct block_defs block_cdu_defs = {
-	"cdu", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	"cdu",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
 	CDU_REG_DBG_FORCE_FRAME,
@@ -1045,8 +1100,9 @@ static struct block_defs block_cdu_defs = {
 };
 
 static struct block_defs block_ccfc_defs = {
-	"ccfc", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	"ccfc",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
 	CCFC_REG_DBG_FORCE_FRAME,
@@ -1054,8 +1110,9 @@ static struct block_defs block_ccfc_defs = {
 };
 
 static struct block_defs block_tcfc_defs = {
-	"tcfc", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	"tcfc",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
 	TCFC_REG_DBG_FORCE_FRAME,
@@ -1063,8 +1120,9 @@ static struct block_defs block_tcfc_defs = {
 };
 
 static struct block_defs block_igu_defs = {
-	"igu", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"igu",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
 	IGU_REG_DBG_FORCE_FRAME,
@@ -1072,8 +1130,9 @@ static struct block_defs block_igu_defs = {
 };
 
 static struct block_defs block_cau_defs = {
-	"cau", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"cau",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
 	CAU_REG_DBG_FORCE_FRAME,
@@ -1081,8 +1140,9 @@ static struct block_defs block_cau_defs = {
 };
 
 static struct block_defs block_umac_defs = {
-	"umac", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+	"umac",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
 	UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
 	UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
 	UMAC_REG_DBG_FORCE_FRAME,
@@ -1090,22 +1150,23 @@ static struct block_defs block_umac_defs = {
 };
 
 static struct block_defs block_xmac_defs = {
-	"xmac", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"xmac", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbg_defs = {
-	"dbg", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"dbg", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
 };
 
 static struct block_defs block_nig_defs = {
-	"nig", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+	"nig",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
 	NIG_REG_DBG_FORCE_FRAME,
@@ -1113,8 +1174,9 @@ static struct block_defs block_nig_defs = {
 };
 
 static struct block_defs block_wol_defs = {
-	"wol", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+	"wol",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
 	WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
 	WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
 	WOL_REG_DBG_FORCE_FRAME,
@@ -1122,8 +1184,9 @@ static struct block_defs block_wol_defs = {
 };
 
 static struct block_defs block_bmbn_defs = {
-	"bmbn", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
+	"bmbn",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
 	BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
 	BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
 	BMBN_REG_DBG_FORCE_FRAME,
@@ -1131,15 +1194,16 @@ static struct block_defs block_bmbn_defs = {
 };
 
 static struct block_defs block_ipc_defs = {
-	"ipc", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"ipc", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
 };
 
 static struct block_defs block_nwm_defs = {
-	"nwm", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+	"nwm",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
 	NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
 	NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
 	NWM_REG_DBG_FORCE_FRAME,
@@ -1147,22 +1211,29 @@ static struct block_defs block_nwm_defs = {
 };
 
 static struct block_defs block_nws_defs = {
-	"nws", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
+	"nws",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+	NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
+	NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
+	NWS_REG_DBG_FORCE_FRAME,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
 };
 
 static struct block_defs block_ms_defs = {
-	"ms", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
+	"ms",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+	MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
+	MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
+	MS_REG_DBG_FORCE_FRAME,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
 };
 
 static struct block_defs block_phy_pcie_defs = {
-	"phy_pcie", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+	"phy_pcie",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
 	PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
 	PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
 	PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -1170,22 +1241,57 @@ static struct block_defs block_phy_pcie_defs = {
 };
 
 static struct block_defs block_led_defs = {
-	"led", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"led", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
-	true, true, DBG_RESET_REG_MISCS_PL_HV, 14
+	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
+};
+
+static struct block_defs block_avs_wrap_defs = {
+	"avs_wrap", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
+};
+
+static struct block_defs block_rgfs_defs = {
+	"rgfs", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_tgfs_defs = {
+	"tgfs", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ptld_defs = {
+	"ptld", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ypld_defs = {
+	"ypld", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_misc_aeu_defs = {
-	"misc_aeu", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"misc_aeu", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_bar0_map_defs = {
-	"bar0_map", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"bar0_map", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
@@ -1269,6 +1375,11 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
 	&block_ms_defs,
 	&block_phy_pcie_defs,
 	&block_led_defs,
+	&block_avs_wrap_defs,
+	&block_rgfs_defs,
+	&block_tgfs_defs,
+	&block_ptld_defs,
+	&block_ypld_defs,
 	&block_misc_aeu_defs,
 	&block_bar0_map_defs,
 };
@@ -1281,65 +1392,67 @@ static struct platform_defs s_platform_defs[] = {
 };
 
 static struct grc_param_defs s_grc_param_defs[] = {
-	{{1, 1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_TSTORM */
-	{{1, 1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_MSTORM */
-	{{1, 1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_USTORM */
-	{{1, 1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_XSTORM */
-	{{1, 1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_YSTORM */
-	{{1, 1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_PSTORM */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_REGS */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_RAM */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_PBUF */
-	{{0, 0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_IOR */
-	{{0, 0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_VFC */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CM_CTX */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_ILT */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_RSS */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CAU */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_QM */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_MCP */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_RESERVED */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CFC */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_IGU */
-	{{0, 0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BRB */
-	{{0, 0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BTB */
-	{{0, 0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BMB */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_NIG */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_MULD */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_PRS */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_DMAE */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_TM */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_SDM */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_DIF */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_STATIC */
-	{{0, 0, 0}, 0, 1, false, 0, 0},	/* DBG_GRC_PARAM_UNSTALL */
-	{{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_TSTORM */
+	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_MSTORM */
+	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_USTORM */
+	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_XSTORM */
+	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_YSTORM */
+	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_PSTORM */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_REGS */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_RAM */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_PBUF */
+	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_IOR */
+	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_VFC */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CM_CTX */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_ILT */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_RSS */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CAU */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_QM */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_MCP */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_RESERVED */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CFC */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_IGU */
+	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BRB */
+	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BTB */
+	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BMB */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_NIG */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_MULD */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_PRS */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_DMAE */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_TM */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_SDM */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_DIF */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_STATIC */
+	{{0, 0}, 0, 1, false, 0, 0},	/* DBG_GRC_PARAM_UNSTALL */
+	{{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
 	 MAX_LCIDS},			/* DBG_GRC_PARAM_NUM_LCIDS */
-	{{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+	{{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
 	 MAX_LTIDS},			/* DBG_GRC_PARAM_NUM_LTIDS */
-	{{0, 0, 0}, 0, 1, true, 0, 0},	/* DBG_GRC_PARAM_EXCLUDE_ALL */
-	{{0, 0, 0}, 0, 1, true, 0, 0},	/* DBG_GRC_PARAM_CRASH */
-	{{0, 0, 0}, 0, 1, false, 1, 0},	/* DBG_GRC_PARAM_PARITY_SAFE */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CM */
-	{{1, 1, 1}, 0, 1, false, 0, 1}	/* DBG_GRC_PARAM_DUMP_PHY */
+	{{0, 0}, 0, 1, true, 0, 0},	/* DBG_GRC_PARAM_EXCLUDE_ALL */
+	{{0, 0}, 0, 1, true, 0, 0},	/* DBG_GRC_PARAM_CRASH */
+	{{0, 0}, 0, 1, false, 1, 0},	/* DBG_GRC_PARAM_PARITY_SAFE */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CM */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_PHY */
+	{{0, 0}, 0, 1, false, 0, 0},	/* DBG_GRC_PARAM_NO_MCP */
+	{{0, 0}, 0, 1, false, 0, 0}	/* DBG_GRC_PARAM_NO_FW_VER */
 };
 
 static struct rss_mem_defs s_rss_mem_defs[] = {
 	{ "rss_mem_cid", "rss_cid", 0,
-	  {256, 256, 320},
-	  {32, 32, 32} },
+	  {256, 320},
+	  {32, 32} },
 	{ "rss_mem_key_msb", "rss_key", 1024,
-	  {128, 128, 208},
-	  {256, 256, 256} },
+	  {128, 208},
+	  {256, 256} },
 	{ "rss_mem_key_lsb", "rss_key", 2048,
-	  {128, 128, 208},
-	  {64, 64, 64} },
+	  {128, 208},
+	  {64, 64} },
 	{ "rss_mem_info", "rss_info", 3072,
-	  {128, 128, 208},
-	  {16, 16, 16} },
+	  {128, 208},
+	  {16, 16} },
 	{ "rss_mem_ind", "rss_ind", 4096,
-	  {(128 * 128), (128 * 128), (128 * 208)},
-	  {16, 16, 16} }
+	  {(128 * 128), (128 * 208)},
+	  {16, 16} }
 };
 
 static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1352,32 +1465,32 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
 static struct big_ram_defs s_big_ram_defs[] = {
 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
-	  {4800, 4800, 5632} },
+	  {4800, 5632} },
 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
-	  {2880, 2880, 3680} },
+	  {2880, 3680} },
 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
-	  {1152, 1152, 1152} }
+	  {1152, 1152} }
 };
 
 static struct reset_reg_defs s_reset_regs_defs[] = {
 	{ MISCS_REG_RESET_PL_UA, 0x0,
-	  {true, true, true} },		/* DBG_RESET_REG_MISCS_PL_UA */
+	  {true, true} },		/* DBG_RESET_REG_MISCS_PL_UA */
 	{ MISCS_REG_RESET_PL_HV, 0x0,
-	  {true, true, true} },		/* DBG_RESET_REG_MISCS_PL_HV */
+	  {true, true} },		/* DBG_RESET_REG_MISCS_PL_HV */
 	{ MISCS_REG_RESET_PL_HV_2, 0x0,
-	  {false, false, true} },	/* DBG_RESET_REG_MISCS_PL_HV_2 */
+	  {false, true} },	/* DBG_RESET_REG_MISCS_PL_HV_2 */
 	{ MISC_REG_RESET_PL_UA, 0x0,
-	  {true, true, true} },		/* DBG_RESET_REG_MISC_PL_UA */
+	  {true, true} },		/* DBG_RESET_REG_MISC_PL_UA */
 	{ MISC_REG_RESET_PL_HV, 0x0,
-	  {true, true, true} },		/* DBG_RESET_REG_MISC_PL_HV */
+	  {true, true} },		/* DBG_RESET_REG_MISC_PL_HV */
 	{ MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
-	  {true, true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+	  {true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
 	{ MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
-	  {true, true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+	  {true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
 	{ MISC_REG_RESET_PL_PDA_VAUX, 0x2,
-	  {true, true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+	  {true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
 };
 
 static struct phy_defs s_phy_defs[] = {
@@ -1410,6 +1523,26 @@ static u32 qed_read_unaligned_dword(u8 *buf)
 	return dword;
 }
 
+/* Returns the value of the specified GRC param */
+static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
+			     enum dbg_grc_params grc_param)
+{
+	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+	return dev_data->grc.param_val[grc_param];
+}
+
+/* Initializes the GRC parameters */
+static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
+{
+	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+	if (!dev_data->grc.params_initialized) {
+		qed_dbg_grc_set_params_default(p_hwfn);
+		dev_data->grc.params_initialized = 1;
+	}
+}
+
 /* Initializes debug data for the specified device */
 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
 					struct qed_ptt *p_ptt)
@@ -1431,6 +1564,10 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
 
 	dev_data->platform_id = PLATFORM_ASIC;
 	dev_data->mode_enable[MODE_ASIC] = 1;
+
+	/* Initializes the GRC parameters */
+	qed_dbg_grc_init_params(p_hwfn);
+
 	dev_data->initialized = true;
 	return DBG_STATUS_OK;
 }
@@ -1561,7 +1698,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
 	int printed_chars;
 	u32 offset = 0;
 
-	if (dump) {
+	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
 		/* Read FW image/version from PRAM in a non-reset SEMI */
 		bool found = false;
 		u8 storm_id;
@@ -1622,7 +1759,7 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
 {
 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
 
-	if (dump) {
+	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
 		u32 global_section_offsize, global_section_addr, mfw_ver;
 		u32 public_data_addr, global_section_offsize_addr;
 		int printed_chars;
@@ -1683,15 +1820,13 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
 					 bool dump,
 					 u8 num_specific_global_params)
 {
+	u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 	u32 offset = 0;
 
 	/* Find platform string and dump global params section header */
 	offset += qed_dump_section_hdr(dump_buf + offset,
-				       dump,
-				       "global_params",
-				       NUM_COMMON_GLOBAL_PARAMS +
-				       num_specific_global_params);
+				       dump, "global_params", num_params);
 
 	/* Store params */
 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
@@ -1815,37 +1950,6 @@ static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
 	}
 }
 
-/* Returns the value of the specified GRC param */
-static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
-			     enum dbg_grc_params grc_param)
-{
-	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-
-	return dev_data->grc.param_val[grc_param];
-}
-
-/* Clear all GRC params */
-static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn)
-{
-	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-	u32 i;
-
-	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
-		dev_data->grc.param_set_by_user[i] = 0;
-}
-
-/* Assign default GRC param values */
-static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
-{
-	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-	u32 i;
-
-	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
-		if (!dev_data->grc.param_set_by_user[i])
-			dev_data->grc.param_val[i] =
-			    s_grc_param_defs[i].default_val[dev_data->chip_id];
-}
-
 /* Returns true if the specified entity (indicated by GRC param) should be
  * included in the dump, false otherwise.
  */
@@ -1971,7 +2075,7 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
 	}
 }
 
-/* Returns the attention name offsets of the specified block */
+/* Returns the attention block data of the specified block */
 static const struct dbg_attn_block_type_data *
 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
 {
@@ -2040,7 +2144,7 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
  * The following parameters are dumped:
  * - 'count' = num_dumped_entries
  * - 'split' = split_type
- * - 'id'i = split_id (dumped only if split_id >= 0)
+ * - 'id' = split_id (dumped only if split_id >= 0)
  * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
  *	param_val != NULL)
  */
@@ -2069,21 +2173,81 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
 	return offset;
 }
 
-/* Dumps GRC register/memory. Returns the dumped size in dwords. */
+/* Dumps the GRC registers in the specified address range.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+				   struct qed_ptt *p_ptt, u32 *dump_buf,
+				   bool dump, u32 addr, u32 len)
+{
+	u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+
+	if (dump)
+		for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
+			*(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+	else
+		offset += len;
+	return offset;
+}
+
+/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
+				      u32 len)
+{
+	if (dump)
+		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
+	return 1;
+}
+
+/* Dumps GRC registers sequence. Returns the dumped size in dwords. */
 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
 				  struct qed_ptt *p_ptt, u32 *dump_buf,
 				  bool dump, u32 addr, u32 len)
 {
-	u32 offset = 0, i;
+	u32 offset = 0;
 
+	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
+	offset += qed_grc_dump_addr_range(p_hwfn,
+					  p_ptt,
+					  dump_buf + offset, dump, addr, len);
+	return offset;
+}
+
+/* Dumps GRC registers sequence with skip cycle.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
+				       struct qed_ptt *p_ptt, u32 *dump_buf,
+				       bool dump, u32 addr, u32 total_len,
+				       u32 read_len, u32 skip_len)
+{
+	u32 offset = 0, reg_offset = 0;
+
+	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
 	if (dump) {
-		*(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT);
-		for (i = 0; i < len; i++, addr++, offset++)
-			*(dump_buf + offset) = qed_rd(p_hwfn,
-						      p_ptt,
-						      DWORDS_TO_BYTES(addr));
+		while (reg_offset < total_len) {
+			u32 curr_len = min_t(u32,
+					     read_len,
+					     total_len - reg_offset);
+			offset += qed_grc_dump_addr_range(p_hwfn,
+							  p_ptt,
+							  dump_buf + offset,
+							  dump, addr, curr_len);
+			reg_offset += curr_len;
+			addr += curr_len;
+			if (reg_offset < total_len) {
+				curr_len = min_t(u32,
+						 skip_len,
+						 total_len - skip_len);
+				memset(dump_buf + offset, 0,
+				       DWORDS_TO_BYTES(curr_len));
+				offset += curr_len;
+				reg_offset += curr_len;
+				addr += curr_len;
+			}
+		}
 	} else {
-		offset += len + 1;
+		offset += total_len;
 	}
 
 	return offset;
@@ -2124,14 +2288,17 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
 				const struct dbg_dump_reg *reg =
 				    (const struct dbg_dump_reg *)
 				    &input_regs_arr.ptr[input_offset];
+				u32 addr, len;
 
+				addr = GET_FIELD(reg->data,
+						 DBG_DUMP_REG_ADDRESS);
+				len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
 				offset +=
-					qed_grc_dump_reg_entry(p_hwfn, p_ptt,
-						    dump_buf + offset, dump,
-						    GET_FIELD(reg->data,
-							DBG_DUMP_REG_ADDRESS),
-						    GET_FIELD(reg->data,
-							DBG_DUMP_REG_LENGTH));
+				    qed_grc_dump_reg_entry(p_hwfn, p_ptt,
+							   dump_buf + offset,
+							   dump,
+							   addr,
+							   len);
 				(*num_dumped_reg_entries)++;
 			}
 		} else {
@@ -2194,8 +2361,14 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
 				  const char *param_name, const char *param_val)
 {
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+	struct chip_platform_defs *p_platform_defs;
 	u32 offset = 0, input_offset = 0;
-	u8 port_id, pf_id;
+	struct chip_defs *p_chip_defs;
+	u8 port_id, pf_id, vf_id;
+	u16 fid;
+
+	p_chip_defs = &s_chip_defs[dev_data->chip_id];
+	p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
 
 	if (dump)
 		DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
@@ -2214,7 +2387,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
 
 		switch (split_type_id) {
 		case SPLIT_TYPE_NONE:
-		case SPLIT_TYPE_VF:
 			offset += qed_grc_dump_split_data(p_hwfn,
 							  p_ptt,
 							  curr_input_regs_arr,
@@ -2227,10 +2399,7 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
 							  param_val);
 			break;
 		case SPLIT_TYPE_PORT:
-			for (port_id = 0;
-			     port_id <
-			     s_chip_defs[dev_data->chip_id].
-			     per_platform[dev_data->platform_id].num_ports;
+			for (port_id = 0; port_id < p_platform_defs->num_ports;
 			     port_id++) {
 				if (dump)
 					qed_port_pretend(p_hwfn, p_ptt,
@@ -2247,20 +2416,48 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
 			break;
 		case SPLIT_TYPE_PF:
 		case SPLIT_TYPE_PORT_PF:
-			for (pf_id = 0;
-			     pf_id <
-			     s_chip_defs[dev_data->chip_id].
-			     per_platform[dev_data->platform_id].num_pfs;
+			for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
 			     pf_id++) {
-				if (dump)
-					qed_fid_pretend(p_hwfn, p_ptt, pf_id);
-				offset += qed_grc_dump_split_data(p_hwfn,
-							p_ptt,
-							curr_input_regs_arr,
-							dump_buf + offset,
-							dump, block_enable,
-							"pf", pf_id, param_name,
-							param_val);
+				u8 pfid_shift =
+					PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+
+				if (dump) {
+					fid = pf_id << pfid_shift;
+					qed_fid_pretend(p_hwfn, p_ptt, fid);
+				}
+
+				offset +=
+				    qed_grc_dump_split_data(p_hwfn, p_ptt,
+							    curr_input_regs_arr,
+							    dump_buf + offset,
+							    dump, block_enable,
+							    "pf", pf_id,
+							    param_name,
+							    param_val);
+			}
+			break;
+		case SPLIT_TYPE_VF:
+			for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
+			     vf_id++) {
+				u8 vfvalid_shift =
+					PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
+				u8 vfid_shift =
+					PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
+
+				if (dump) {
+					fid = BIT(vfvalid_shift) |
+					      (vf_id << vfid_shift);
+					qed_fid_pretend(p_hwfn, p_ptt, fid);
+				}
+
+				offset +=
+				    qed_grc_dump_split_data(p_hwfn, p_ptt,
+							    curr_input_regs_arr,
+							    dump_buf + offset,
+							    dump, block_enable,
+							    "vf", vf_id,
+							    param_name,
+							    param_val);
 			}
 			break;
 		default:
@@ -2271,8 +2468,11 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
 	}
 
 	/* Pretend to original PF */
-	if (dump)
-		qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+	if (dump) {
+		fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+		qed_fid_pretend(p_hwfn, p_ptt, fid);
+	}
+
 	return offset;
 }
 
@@ -2291,13 +2491,14 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
 	/* Write reset registers */
 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
 		if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+			u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
+
 			offset += qed_grc_dump_reg_entry(p_hwfn,
 							 p_ptt,
 							 dump_buf + offset,
 							 dump,
-							 BYTES_TO_DWORDS
-							 (s_reset_regs_defs
-							  [i].addr), 1);
+							 addr,
+							 1);
 			num_regs++;
 		}
 	}
@@ -2339,6 +2540,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
 				&attn_reg_arr[reg_idx];
 			u16 modes_buf_offset;
 			bool eval_mode;
+			u32 addr;
 
 			/* Check mode */
 			eval_mode = GET_FIELD(reg_data->mode.data,
@@ -2349,19 +2551,23 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
 			if (!eval_mode ||
 			    qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
 				/* Mode match - read and dump registers */
-				offset += qed_grc_dump_reg_entry(p_hwfn,
-							p_ptt,
-							dump_buf + offset,
-							dump,
-							reg_data->mask_address,
-							1);
-				offset += qed_grc_dump_reg_entry(p_hwfn,
-						p_ptt,
-						dump_buf + offset,
-						dump,
-						GET_FIELD(reg_data->data,
-						    DBG_ATTN_REG_STS_ADDRESS),
-						1);
+				addr = reg_data->mask_address;
+				offset +=
+				    qed_grc_dump_reg_entry(p_hwfn,
+							   p_ptt,
+							   dump_buf + offset,
+							   dump,
+							   addr,
+							   1);
+				addr = GET_FIELD(reg_data->data,
+						 DBG_ATTN_REG_STS_ADDRESS);
+				offset +=
+				    qed_grc_dump_reg_entry(p_hwfn,
+							   p_ptt,
+							   dump_buf + offset,
+							   dump,
+							   addr,
+							   1);
 				num_reg_entries += 2;
 			}
 		}
@@ -2369,18 +2575,21 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
 
 	/* Write storm stall status registers */
 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+		u32 addr;
+
 		if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
 		    dump)
 			continue;
 
+		addr =
+		    BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
+				    SEM_FAST_REG_STALLED);
 		offset += qed_grc_dump_reg_entry(p_hwfn,
-					p_ptt,
-					dump_buf + offset,
-					dump,
-					BYTES_TO_DWORDS(s_storm_defs[storm_id].
-							sem_fast_mem_addr +
-							SEM_FAST_REG_STALLED),
-					1);
+						 p_ptt,
+						 dump_buf + offset,
+						 dump,
+						 addr,
+						 1);
 		num_reg_entries++;
 	}
 
@@ -2392,11 +2601,47 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
 	return offset;
 }
 
+/* Dumps registers that can't be represented in the debug arrays */
+static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt,
+				     u32 *dump_buf, bool dump)
+{
+	u32 offset = 0, addr;
+
+	offset += qed_grc_dump_regs_hdr(dump_buf,
+					dump, 2, "eng", -1, NULL, NULL);
+
+	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
+	 * skipped).
+	 */
+	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
+	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
+					      p_ptt,
+					      dump_buf + offset,
+					      dump,
+					      addr,
+					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
+					      7,
+					      1);
+	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
+	offset +=
+	    qed_grc_dump_reg_entry_skip(p_hwfn,
+					p_ptt,
+					dump_buf + offset,
+					dump,
+					addr,
+					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
+					7,
+					1);
+
+	return offset;
+}
+
 /* Dumps a GRC memory header (section and params).
  * The following parameters are dumped:
  * name - name is dumped only if it's not NULL.
- * addr - byte_addr is dumped only if name is NULL.
- * len - dword_len is always dumped.
+ * addr - addr is dumped only if name is NULL.
+ * len - len is always dumped.
  * width - bit_width is dumped if it's not zero.
  * packed - packed=1 is dumped if it's not false.
  * mem_group - mem_group is always dumped.
@@ -2408,8 +2653,8 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
 				u32 *dump_buf,
 				bool dump,
 				const char *name,
-				u32 byte_addr,
-				u32 dword_len,
+				u32 addr,
+				u32 len,
 				u32 bit_width,
 				bool packed,
 				const char *mem_group,
@@ -2419,7 +2664,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
 	u32 offset = 0;
 	char buf[64];
 
-	if (!dword_len)
+	if (!len)
 		DP_NOTICE(p_hwfn,
 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
 	if (bit_width)
@@ -2446,20 +2691,21 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
 			DP_VERBOSE(p_hwfn,
 				   QED_MSG_DEBUG,
 				   "Dumping %d registers from %s...\n",
-				   dword_len, buf);
+				   len, buf);
 	} else {
 		/* Dump address */
 		offset += qed_dump_num_param(dump_buf + offset,
-					     dump, "addr", byte_addr);
-		if (dump && dword_len > 64)
+					     dump, "addr",
+					     DWORDS_TO_BYTES(addr));
+		if (dump && len > 64)
 			DP_VERBOSE(p_hwfn,
 				   QED_MSG_DEBUG,
 				   "Dumping %d registers from address 0x%x...\n",
-				   dword_len, byte_addr);
+				   len, (u32)DWORDS_TO_BYTES(addr));
 	}
 
 	/* Dump len */
-	offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len);
+	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
 
 	/* Dump bit width */
 	if (bit_width)
@@ -2492,8 +2738,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
 			    u32 *dump_buf,
 			    bool dump,
 			    const char *name,
-			    u32 byte_addr,
-			    u32 dword_len,
+			    u32 addr,
+			    u32 len,
 			    u32 bit_width,
 			    bool packed,
 			    const char *mem_group,
@@ -2505,21 +2751,14 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
 				       dump_buf + offset,
 				       dump,
 				       name,
-				       byte_addr,
-				       dword_len,
+				       addr,
+				       len,
 				       bit_width,
 				       packed,
 				       mem_group, is_storm, storm_letter);
-	if (dump) {
-		u32 i;
-
-		for (i = 0; i < dword_len;
-		     i++, byte_addr += BYTES_IN_DWORD, offset++)
-			*(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
-	} else {
-		offset += dword_len;
-	}
-
+	offset += qed_grc_dump_addr_range(p_hwfn,
+					  p_ptt,
+					  dump_buf + offset, dump, addr, len);
 	return offset;
 }
 
@@ -2575,25 +2814,41 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
 			if (qed_grc_is_mem_included(p_hwfn,
 					(enum block_id)cond_hdr->block_id,
 					mem_group_id)) {
-				u32 mem_byte_addr =
-					DWORDS_TO_BYTES(GET_FIELD(mem->dword0,
-							DBG_DUMP_MEM_ADDRESS));
+				u32 mem_addr = GET_FIELD(mem->dword0,
+							 DBG_DUMP_MEM_ADDRESS);
 				u32 mem_len = GET_FIELD(mem->dword1,
 							DBG_DUMP_MEM_LENGTH);
+				enum dbg_grc_params grc_param;
 				char storm_letter = 'a';
 				bool is_storm = false;
 
 				/* Update memory length for CCFC/TCFC memories
 				 * according to number of LCIDs/LTIDs.
 				 */
-				if (mem_group_id == MEM_GROUP_CONN_CFC_MEM)
+				if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
+					if (mem_len % MAX_LCIDS != 0) {
+						DP_NOTICE(p_hwfn,
+							  "Invalid CCFC connection memory size\n");
+						return 0;
+					}
+
+					grc_param = DBG_GRC_PARAM_NUM_LCIDS;
 					mem_len = qed_grc_get_param(p_hwfn,
-							DBG_GRC_PARAM_NUM_LCIDS)
-							* (mem_len / MAX_LCIDS);
-				else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+								    grc_param) *
+						  (mem_len / MAX_LCIDS);
+				} else if (mem_group_id ==
+					   MEM_GROUP_TASK_CFC_MEM) {
+					if (mem_len % MAX_LTIDS != 0) {
+						DP_NOTICE(p_hwfn,
+							  "Invalid TCFC task memory size\n");
+						return 0;
+					}
+
+					grc_param = DBG_GRC_PARAM_NUM_LTIDS;
 					mem_len = qed_grc_get_param(p_hwfn,
-							DBG_GRC_PARAM_NUM_LTIDS)
-							* (mem_len / MAX_LTIDS);
+								    grc_param) *
+						  (mem_len / MAX_LTIDS);
+				}
 
 				/* If memory is associated with Storm, update
 				 * Storm details.
@@ -2610,7 +2865,7 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
 				/* Dump memory */
 				offset += qed_grc_dump_mem(p_hwfn, p_ptt,
 						dump_buf + offset, dump, NULL,
-						mem_byte_addr, mem_len, 0,
+						mem_addr, mem_len, 0,
 						false,
 						s_mem_group_names[mem_group_id],
 						is_storm, storm_letter);
@@ -2799,29 +3054,31 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
 	u32 offset = 0;
 
 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
-		if (qed_grc_is_storm_included(p_hwfn,
-					      (enum dbg_storms)storm_id)) {
-			for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
-				u32 addr =
-				    s_storm_defs[storm_id].sem_fast_mem_addr +
-				    SEM_FAST_REG_STORM_REG_FILE +
-				    DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id));
+		struct storm_defs *storm = &s_storm_defs[storm_id];
 
-				buf[strlen(buf) - 1] = '0' + set_id;
-				offset += qed_grc_dump_mem(p_hwfn,
-							   p_ptt,
-							   dump_buf + offset,
-							   dump,
-							   buf,
-							   addr,
-							   IORS_PER_SET,
-							   32,
-							   false,
-							   "ior",
-							   true,
-							   s_storm_defs
-							   [storm_id].letter);
-			}
+		if (!qed_grc_is_storm_included(p_hwfn,
+					       (enum dbg_storms)storm_id))
+			continue;
+
+		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
+			u32 dwords, addr;
+
+			dwords = storm->sem_fast_mem_addr +
+				 SEM_FAST_REG_STORM_REG_FILE;
+			addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
+			buf[strlen(buf) - 1] = '0' + set_id;
+			offset += qed_grc_dump_mem(p_hwfn,
+						   p_ptt,
+						   dump_buf + offset,
+						   dump,
+						   buf,
+						   addr,
+						   IORS_PER_SET,
+						   32,
+						   false,
+						   "ior",
+						   true,
+						   storm->letter);
 		}
 	}
 
@@ -2990,34 +3247,39 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
 		struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
 		u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
 		u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
-		u32 total_size = (num_entries * entry_width) / 32;
+		u32 total_dwords = (num_entries * entry_width) / 32;
+		u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
 		bool packed = (entry_width == 16);
-		u32 addr = rss_defs->addr;
-		u32 i, j;
+		u32 rss_addr = rss_defs->addr;
+		u32 i, addr;
 
 		offset += qed_grc_dump_mem_hdr(p_hwfn,
 					       dump_buf + offset,
 					       dump,
 					       rss_defs->mem_name,
-					       addr,
-					       total_size,
+					       0,
+					       total_dwords,
 					       entry_width,
 					       packed,
 					       rss_defs->type_name, false, 0);
 
 		if (!dump) {
-			offset += total_size;
+			offset += total_dwords;
 			continue;
 		}
 
 		/* Dump RSS data */
-		for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) {
-			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr);
-			for (j = 0; j < BYTES_IN_DWORD; j++, offset++)
-				*(dump_buf + offset) =
-					qed_rd(p_hwfn, p_ptt,
-					       RSS_REG_RSS_RAM_DATA +
-					       DWORDS_TO_BYTES(j));
+		for (i = 0; i < total_dwords;
+		     i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
+			addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
+				offset += qed_grc_dump_addr_range(p_hwfn,
+								  p_ptt,
+								  dump_buf +
+								  offset,
+								  dump,
+								  addr,
+								  size);
 		}
 	}
 
@@ -3030,19 +3292,19 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
 				u32 *dump_buf, bool dump, u8 big_ram_id)
 {
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+	u32 total_blocks, ram_size, offset = 0, i;
 	char mem_name[12] = "???_BIG_RAM";
 	char type_name[8] = "???_RAM";
-	u32 ram_size, total_blocks;
-	u32 offset = 0, i, j;
+	struct big_ram_defs *big_ram;
 
-	total_blocks =
-		s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
+	big_ram = &s_big_ram_defs[big_ram_id];
+	total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
 	ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
 
-	strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
-		strlen(s_big_ram_defs[big_ram_id].instance_name));
-	strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
-		strlen(s_big_ram_defs[big_ram_id].instance_name));
+	strncpy(type_name, big_ram->instance_name,
+		strlen(big_ram->instance_name));
+	strncpy(mem_name, big_ram->instance_name,
+		strlen(big_ram->instance_name));
 
 	/* Dump memory header */
 	offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -3059,13 +3321,17 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
 
 	/* Read and dump Big RAM data */
 	for (i = 0; i < total_blocks / 2; i++) {
-		qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr,
-		       i);
-		for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++)
-			*(dump_buf + offset) = qed_rd(p_hwfn, p_ptt,
-						s_big_ram_defs[big_ram_id].
-							data_reg_addr +
-						DWORDS_TO_BYTES(j));
+		u32 addr, len;
+
+		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
+		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
+		len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+		offset += qed_grc_dump_addr_range(p_hwfn,
+						  p_ptt,
+						  dump_buf + offset,
+						  dump,
+						  addr,
+						  len);
 	}
 
 	return offset;
@@ -3075,11 +3341,11 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
 {
 	bool block_enable[MAX_BLOCK_ID] = { 0 };
+	u32 offset = 0, addr;
 	bool halted = false;
-	u32 offset = 0;
 
 	/* Halt MCP */
-	if (dump) {
+	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
 		if (!halted)
 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -3091,7 +3357,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
 				   dump_buf + offset,
 				   dump,
 				   NULL,
-				   MCP_REG_SCRATCH,
+				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
 				   MCP_REG_SCRATCH_SIZE,
 				   0, false, "MCP", false, 0);
 
@@ -3101,7 +3367,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
 				   dump_buf + offset,
 				   dump,
 				   NULL,
-				   MCP_REG_CPU_REG_FILE,
+				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
 				   MCP_REG_CPU_REG_FILE_SIZE,
 				   0, false, "MCP", false, 0);
 
@@ -3115,12 +3381,13 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
 	/* Dump required non-MCP registers */
 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
 					dump, 1, "eng", -1, "block", "MCP");
+	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
 	offset += qed_grc_dump_reg_entry(p_hwfn,
 					 p_ptt,
 					 dump_buf + offset,
 					 dump,
-					 BYTES_TO_DWORDS
-					 (MISC_REG_SHARED_MEM_ADDR), 1);
+					 addr,
+					 1);
 
 	/* Release MCP */
 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
@@ -3212,7 +3479,7 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
 {
 	u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-	u32 offset = 0, block_id, line_id, addr, i;
+	u32 offset = 0, block_id, line_id;
 	struct block_defs *p_block_defs;
 
 	if (dump) {
@@ -3255,6 +3522,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
 		if (dump && !dev_data->block_in_reset[block_id]) {
 			u8 dbg_client_id =
 				p_block_defs->dbg_client_id[dev_data->chip_id];
+			u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
+			u32 len = STATIC_DEBUG_LINE_DWORDS;
 
 			/* Enable block's client */
 			qed_bus_enable_clients(p_hwfn, p_ptt,
@@ -3270,11 +3539,13 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
 						    0xf, 0, 0, 0);
 
 				/* Read debug line info */
-				for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA;
-				     i < STATIC_DEBUG_LINE_DWORDS;
-				     i++, offset++, addr += BYTES_IN_DWORD)
-					dump_buf[offset] = qed_rd(p_hwfn, p_ptt,
-								  addr);
+				offset +=
+				    qed_grc_dump_addr_range(p_hwfn,
+							    p_ptt,
+							    dump_buf + offset,
+							    dump,
+							    addr,
+							    len);
 			}
 
 			/* Disable block's client and debug output */
@@ -3311,14 +3582,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
 	u8 i, port_mode = 0;
 	u32 offset = 0;
 
-	/* Check if emulation platform */
 	*num_dumped_dwords = 0;
 
-	/* Fill GRC parameters that were not set by the user with their default
-	 * value.
-	 */
-	qed_dbg_grc_set_params_default(p_hwfn);
-
 	/* Find port mode */
 	if (dump) {
 		switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
@@ -3370,15 +3635,14 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
 	}
 
 	/* Disable all parities using MFW command */
-	if (dump) {
+	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
 		if (!parities_masked) {
+			DP_NOTICE(p_hwfn,
+				  "Failed to mask parities using MFW\n");
 			if (qed_grc_get_param
 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
-			else
-				DP_NOTICE(p_hwfn,
-					  "Failed to mask parities using MFW\n");
 		}
 	}
 
@@ -3409,6 +3673,11 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
 						 offset,
 						 dump,
 						 block_enable, NULL, NULL);
+
+		/* Dump special registers */
+		offset += qed_grc_dump_special_regs(p_hwfn,
+						    p_ptt,
+						    dump_buf + offset, dump);
 	}
 
 	/* Dump memories */
@@ -3583,9 +3852,9 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
 			}
 
 			if (mode_match) {
-				u32 grc_addr =
-					DWORDS_TO_BYTES(GET_FIELD(reg->data,
-						DBG_IDLE_CHK_INFO_REG_ADDRESS));
+				u32 addr =
+				    GET_FIELD(reg->data,
+					      DBG_IDLE_CHK_INFO_REG_ADDRESS);
 
 				/* Write register header */
 				struct dbg_idle_chk_result_reg_hdr *reg_hdr =
@@ -3597,16 +3866,19 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
 				memset(reg_hdr, 0, sizeof(*reg_hdr));
 				reg_hdr->size = reg->size;
 				SET_FIELD(reg_hdr->data,
-					DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
-					rule->num_cond_regs + reg_id);
+					  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+					  rule->num_cond_regs + reg_id);
 
 				/* Write register values */
-				for (i = 0; i < reg->size;
-				     i++, offset++, grc_addr += 4)
-					dump_buf[offset] =
-						qed_rd(p_hwfn, p_ptt, grc_addr);
-				}
+				offset +=
+				    qed_grc_dump_addr_range(p_hwfn,
+							    p_ptt,
+							    dump_buf + offset,
+							    dump,
+							    addr,
+							    reg->size);
 			}
+		}
 	}
 
 	return offset;
@@ -3621,7 +3893,7 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 {
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
-	u32 i, j, offset = 0;
+	u32 i, offset = 0;
 	u16 entry_id;
 	u8 reg_id;
 
@@ -3664,73 +3936,83 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 		if (!check_rule && dump)
 			continue;
 
+		if (!dump) {
+			u32 entry_dump_size =
+				qed_idle_chk_dump_failure(p_hwfn,
+							  p_ptt,
+							  dump_buf + offset,
+							  false,
+							  rule->rule_id,
+							  rule,
+							  0,
+							  NULL);
+
+			offset += num_reg_entries * entry_dump_size;
+			(*num_failing_rules) += num_reg_entries;
+			continue;
+		}
+
 		/* Go over all register entries (number of entries is the same
 		 * for all condition registers).
 		 */
 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
 			/* Read current entry of all condition registers */
-			if (dump) {
-				u32 next_reg_offset = 0;
+			u32 next_reg_offset = 0;
 
-				for (reg_id = 0;
-				     reg_id < rule->num_cond_regs;
-				     reg_id++) {
-					const struct dbg_idle_chk_cond_reg
-						*reg = &cond_regs[reg_id];
+			for (reg_id = 0; reg_id < rule->num_cond_regs;
+			     reg_id++) {
+				const struct dbg_idle_chk_cond_reg *reg =
+					&cond_regs[reg_id];
 
-					/* Find GRC address (if it's a memory,
-					 * the address of the specific entry is
-					 * calculated).
-					 */
-					u32 grc_addr =
-					   DWORDS_TO_BYTES(
-						GET_FIELD(reg->data,
-						    DBG_IDLE_CHK_COND_REG_ADDRESS));
+				/* Find GRC address (if it's a memory,the
+				 * address of the specific entry is calculated).
+				 */
+				u32 addr =
+				    GET_FIELD(reg->data,
+					      DBG_IDLE_CHK_COND_REG_ADDRESS);
 
-					if (reg->num_entries > 1 ||
-					    reg->start_entry > 0) {
-						u32 padded_entry_size =
-							reg->entry_size > 1 ?
-							roundup_pow_of_two
-							(reg->entry_size) : 1;
+				if (reg->num_entries > 1 ||
+				    reg->start_entry > 0) {
+					u32 padded_entry_size =
+					   reg->entry_size > 1 ?
+					   roundup_pow_of_two(reg->entry_size) :
+					   1;
 
-						grc_addr +=
-							DWORDS_TO_BYTES(
-								(reg->start_entry +
-								entry_id)
-								* padded_entry_size);
-					}
-
-					/* Read registers */
-					if (next_reg_offset + reg->entry_size >=
-					    IDLE_CHK_MAX_ENTRIES_SIZE) {
-						DP_NOTICE(p_hwfn,
-							  "idle check registers entry is too large\n");
-						return 0;
-					}
-
-					for (j = 0; j < reg->entry_size;
-					     j++, next_reg_offset++,
-					     grc_addr += 4)
-					     cond_reg_values[next_reg_offset] =
-						qed_rd(p_hwfn, p_ptt, grc_addr);
+					addr += (reg->start_entry + entry_id) *
+						padded_entry_size;
 				}
+
+				/* Read registers */
+				if (next_reg_offset + reg->entry_size >=
+				    IDLE_CHK_MAX_ENTRIES_SIZE) {
+					DP_NOTICE(p_hwfn,
+						  "idle check registers entry is too large\n");
+					return 0;
+				}
+
+				next_reg_offset +=
+				    qed_grc_dump_addr_range(p_hwfn,
+							    p_ptt,
+							    cond_reg_values +
+							    next_reg_offset,
+							    dump, addr,
+							    reg->entry_size);
 			}
 
 			/* Call rule's condition function - a return value of
 			 * true indicates failure.
 			 */
 			if ((*cond_arr[rule->cond_id])(cond_reg_values,
-						       imm_values) || !dump) {
+						       imm_values)) {
 				offset +=
-					qed_idle_chk_dump_failure(p_hwfn,
-							p_ptt,
-							dump_buf + offset,
-							dump,
-							rule->rule_id,
-							rule,
-							entry_id,
-							cond_reg_values);
+				    qed_idle_chk_dump_failure(p_hwfn,
+							      p_ptt,
+							      dump_buf + offset,
+							      dump,
+							      rule->rule_id,
+							      rule,
+							      entry_id,
+							      cond_reg_values);
 				(*num_failing_rules)++;
 				break;
 			}
@@ -3818,13 +4100,18 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
 	struct mcp_file_att file_att;
 
 	/* Call NVRAM get file command */
-	if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT,
-			       image_type, &ret_mcp_resp, &ret_mcp_param,
-			       &ret_txn_size, (u32 *)&file_att) != 0)
-		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+	int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+					    p_ptt,
+					    DRV_MSG_CODE_NVM_GET_FILE_ATT,
+					    image_type,
+					    &ret_mcp_resp,
+					    &ret_mcp_param,
+					    &ret_txn_size,
+					    (u32 *)&file_att);
 
 	/* Check response */
-	if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+	if (nvm_result ||
+	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
 
 	/* Update return values */
@@ -3944,7 +4231,6 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
 	u32 running_mfw_addr =
 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
-	enum dbg_status status;
 	u32 nvram_image_type;
 
 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
@@ -3955,30 +4241,12 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
 	nvram_image_type =
 	    (*running_bundle_id ==
 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
-	status = qed_find_nvram_image(p_hwfn,
-				      p_ptt,
-				      nvram_image_type,
-				      trace_meta_offset_bytes,
-				      trace_meta_size_bytes);
 
-	return status;
-}
-
-/* Reads the MCP Trace data from the specified GRC address into the specified
- * buffer.
- */
-static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn,
-				    struct qed_ptt *p_ptt,
-				    u32 grc_addr, u32 size_in_dwords, u32 *buf)
-{
-	u32 i;
-
-	DP_VERBOSE(p_hwfn,
-		   QED_MSG_DEBUG,
-		   "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n",
-		   size_in_dwords, grc_addr);
-	for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD)
-		buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr);
+	return qed_find_nvram_image(p_hwfn,
+				    p_ptt,
+				    nvram_image_type,
+				    trace_meta_offset_bytes,
+				    trace_meta_size_bytes);
 }
 
 /* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
@@ -4034,11 +4302,14 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 					  bool dump, u32 *num_dumped_dwords)
 {
 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
-	u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
-	u32 trace_meta_offset_bytes, trace_meta_size_bytes;
+	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
+	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
 	enum dbg_status status;
+	bool mcp_access;
 	int halted = 0;
 
+	mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+
 	*num_dumped_dwords = 0;
 
 	/* Get trace data info */
@@ -4060,7 +4331,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 	 * consistent if halt fails, MCP trace is taken anyway, with a small
 	 * risk that it may be corrupt.
 	 */
-	if (dump) {
+	if (dump && mcp_access) {
 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
 		if (!halted)
 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -4078,13 +4349,12 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 				     dump, "size", trace_data_size_dwords);
 
 	/* Read trace data from scratchpad into dump buffer */
-	if (dump)
-		qed_mcp_trace_read_data(p_hwfn,
-					p_ptt,
-					trace_data_grc_addr,
-					trace_data_size_dwords,
-					dump_buf + offset);
-	offset += trace_data_size_dwords;
+	offset += qed_grc_dump_addr_range(p_hwfn,
+					  p_ptt,
+					  dump_buf + offset,
+					  dump,
+					  BYTES_TO_DWORDS(trace_data_grc_addr),
+					  trace_data_size_dwords);
 
 	/* Resume MCP (only if halt succeeded) */
 	if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
@@ -4095,38 +4365,38 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 				       dump, "mcp_trace_meta", 1);
 
 	/* Read trace meta info */
-	status = qed_mcp_trace_get_meta_info(p_hwfn,
-					     p_ptt,
-					     trace_data_size_bytes,
-					     &running_bundle_id,
-					     &trace_meta_offset_bytes,
-					     &trace_meta_size_bytes);
-	if (status != DBG_STATUS_OK)
-		return status;
-
-	/* Dump trace meta size param (trace_meta_size_bytes is always
-	 * dword-aligned).
-	 */
-	trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
-	offset += qed_dump_num_param(dump_buf + offset,	dump, "size",
-				     trace_meta_size_dwords);
-
-	/* Read trace meta image into dump buffer */
-	if (dump) {
-		status = qed_mcp_trace_read_meta(p_hwfn,
-						p_ptt,
-						trace_meta_offset_bytes,
-						trace_meta_size_bytes,
-						dump_buf + offset);
-		if (status != DBG_STATUS_OK)
-			return status;
+	if (mcp_access) {
+		status = qed_mcp_trace_get_meta_info(p_hwfn,
+						     p_ptt,
+						     trace_data_size_bytes,
+						     &running_bundle_id,
+						     &trace_meta_offset_bytes,
+						     &trace_meta_size_bytes);
+		if (status == DBG_STATUS_OK)
+			trace_meta_size_dwords =
+				BYTES_TO_DWORDS(trace_meta_size_bytes);
 	}
 
-	offset += trace_meta_size_dwords;
+	/* Dump trace meta size param */
+	offset += qed_dump_num_param(dump_buf + offset,
+				     dump, "size", trace_meta_size_dwords);
+
+	/* Read trace meta image into dump buffer */
+	if (dump && trace_meta_size_dwords)
+		status = qed_mcp_trace_read_meta(p_hwfn,
+						 p_ptt,
+						 trace_meta_offset_bytes,
+						 trace_meta_size_bytes,
+						 dump_buf + offset);
+	if (status == DBG_STATUS_OK)
+		offset += trace_meta_size_dwords;
 
 	*num_dumped_dwords = offset;
 
-	return DBG_STATUS_OK;
+	/* If no mcp access, indicate that the dump doesn't contain the meta
+	 * data from NVRAM.
+	 */
+	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
 }
 
 /* Dump GRC FIFO */
@@ -4311,9 +4581,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
 {
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+	struct fw_asserts_ram_section *asserts;
 	char storm_letter_str[2] = "?";
 	struct fw_info fw_info;
-	u32 offset = 0, i;
+	u32 offset = 0;
 	u8 storm_id;
 
 	/* Dump global params */
@@ -4323,8 +4594,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 	offset += qed_dump_str_param(dump_buf + offset,
 				     dump, "dump-type", "fw-asserts");
 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
-		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx,
-			last_list_idx, element_addr;
+		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
+		u32 last_list_idx, addr;
 
 		if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
 			continue;
@@ -4332,6 +4603,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 		/* Read FW info for the current Storm */
 		qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
 
+		asserts = &fw_info.fw_asserts_section;
+
 		/* Dump FW Asserts section header and params */
 		storm_letter_str[0] = s_storm_defs[storm_id].letter;
 		offset += qed_dump_section_hdr(dump_buf + offset, dump,
@@ -4339,12 +4612,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 		offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
 					     storm_letter_str);
 		offset += qed_dump_num_param(dump_buf + offset, dump, "size",
-					     fw_info.fw_asserts_section.
-					     list_element_dword_size);
+					     asserts->list_element_dword_size);
 
 		if (!dump) {
-			offset += fw_info.fw_asserts_section.
-				  list_element_dword_size;
+			offset += asserts->list_element_dword_size;
 			continue;
 		}
 
@@ -4352,28 +4623,22 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 		fw_asserts_section_addr =
 			s_storm_defs[storm_id].sem_fast_mem_addr +
 			SEM_FAST_REG_INT_RAM +
-			RAM_LINES_TO_BYTES(fw_info.fw_asserts_section.
-					   section_ram_line_offset);
+			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
 		next_list_idx_addr =
 			fw_asserts_section_addr +
-			DWORDS_TO_BYTES(fw_info.fw_asserts_section.
-					list_next_index_dword_offset);
+			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
 		last_list_idx = (next_list_idx > 0
 				 ? next_list_idx
-				 : fw_info.fw_asserts_section.list_num_elements)
-				- 1;
-		element_addr =
-			fw_asserts_section_addr +
-			DWORDS_TO_BYTES(fw_info.fw_asserts_section.
-					list_dword_offset) +
-			last_list_idx *
-			DWORDS_TO_BYTES(fw_info.fw_asserts_section.
-					list_element_dword_size);
-		for (i = 0;
-		     i < fw_info.fw_asserts_section.list_element_dword_size;
-		     i++, offset++, element_addr += BYTES_IN_DWORD)
-			dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr);
+				 : asserts->list_num_elements) - 1;
+		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
+		       asserts->list_dword_offset +
+		       last_list_idx * asserts->list_element_dword_size;
+		offset +=
+		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
+					    dump_buf + offset,
+					    dump, addr,
+					    asserts->list_element_dword_size);
 	}
 
 	/* Dump last section */
@@ -4386,13 +4651,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
 {
 	/* Convert binary data to debug arrays */
-	u32 num_of_buffers = *(u32 *)bin_ptr;
-	struct bin_buffer_hdr *buf_array;
+	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
 	u8 buf_id;
 
-	buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
-	for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
 		s_dbg_arrays[buf_id].ptr =
 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
 		s_dbg_arrays[buf_id].size_in_dwords =
@@ -4402,6 +4664,17 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
 	return DBG_STATUS_OK;
 }
 
+/* Assign default GRC param values */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+{
+	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+	u32 i;
+
+	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+		dev_data->grc.param_val[i] =
+		    s_grc_param_defs[i].default_val[dev_data->chip_id];
+}
+
 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
 					      struct qed_ptt *p_ptt,
 					      u32 *buf_size)
@@ -4441,8 +4714,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
 	/* GRC Dump */
 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
 
-	/* Clear all GRC params */
-	qed_dbg_grc_clear_params(p_hwfn);
+	/* Revert GRC params to their default */
+	qed_dbg_grc_set_params_default(p_hwfn);
+
 	return status;
 }
 
@@ -4495,6 +4769,10 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
 
 	/* Idle Check Dump */
 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
+
+	/* Revert GRC params to their default */
+	qed_dbg_grc_set_params_default(p_hwfn);
+
 	return DBG_STATUS_OK;
 }
 
@@ -4519,11 +4797,15 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 	u32 needed_buf_size_in_dwords;
 	enum dbg_status status;
 
-	status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
+	/* validate buffer size */
+	status =
+	    qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
 						&needed_buf_size_in_dwords);
 
-	if (status != DBG_STATUS_OK)
+	if (status != DBG_STATUS_OK &&
+	    status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
 		return status;
+
 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
 
@@ -4531,8 +4813,13 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
 
 	/* Perform dump */
-	return qed_mcp_trace_dump(p_hwfn,
-				  p_ptt, dump_buf, true, num_dumped_dwords);
+	status = qed_mcp_trace_dump(p_hwfn,
+				    p_ptt, dump_buf, true, num_dumped_dwords);
+
+	/* Revert GRC params to their default */
+	qed_dbg_grc_set_params_default(p_hwfn);
+
+	return status;
 }
 
 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4567,8 +4854,14 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 
 	/* Update reset state */
 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
-	return qed_reg_fifo_dump(p_hwfn,
-				 p_ptt, dump_buf, true, num_dumped_dwords);
+
+	status = qed_reg_fifo_dump(p_hwfn,
+				   p_ptt, dump_buf, true, num_dumped_dwords);
+
+	/* Revert GRC params to their default */
+	qed_dbg_grc_set_params_default(p_hwfn);
+
+	return status;
 }
 
 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4603,8 +4896,13 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 
 	/* Update reset state */
 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
-	return qed_igu_fifo_dump(p_hwfn,
-				 p_ptt, dump_buf, true, num_dumped_dwords);
+
+	status = qed_igu_fifo_dump(p_hwfn,
+				   p_ptt, dump_buf, true, num_dumped_dwords);
+	/* Revert GRC params to their default */
+	qed_dbg_grc_set_params_default(p_hwfn);
+
+	return status;
 }
 
 enum dbg_status
@@ -4641,9 +4939,16 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
 
 	/* Update reset state */
 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
-	return qed_protection_override_dump(p_hwfn,
-					    p_ptt,
-					    dump_buf, true, num_dumped_dwords);
+
+	status = qed_protection_override_dump(p_hwfn,
+					      p_ptt,
+					      dump_buf,
+					      true, num_dumped_dwords);
+
+	/* Revert GRC params to their default */
+	qed_dbg_grc_set_params_default(p_hwfn);
+
+	return status;
 }
 
 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -5045,13 +5350,10 @@ static char s_temp_buf[MAX_MSG_LEN];
 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
 {
 	/* Convert binary data to debug arrays */
-	u32 num_of_buffers = *(u32 *)bin_ptr;
-	struct bin_buffer_hdr *buf_array;
+	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
 	u8 buf_id;
 
-	buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
-	for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
 		s_dbg_arrays[buf_id].ptr =
 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
 		s_dbg_arrays[buf_id].size_in_dwords =
@@ -5874,16 +6176,16 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 		results_offset +=
 		    sprintf(qed_get_buf_ptr(results_buf,
 					    results_offset),
-			    "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
 			    elements[i].data,
-			    GET_FIELD(elements[i].data,
+			    (u32)GET_FIELD(elements[i].data,
 				      REG_FIFO_ELEMENT_ADDRESS) *
 				      REG_FIFO_ELEMENT_ADDR_FACTOR,
 				      s_access_strs[GET_FIELD(elements[i].data,
 						    REG_FIFO_ELEMENT_ACCESS)],
-			    GET_FIELD(elements[i].data,
-				      REG_FIFO_ELEMENT_PF), vf_str,
-			    GET_FIELD(elements[i].data,
+			    (u32)GET_FIELD(elements[i].data,
+					   REG_FIFO_ELEMENT_PF), vf_str,
+			    (u32)GET_FIELD(elements[i].data,
 				      REG_FIFO_ELEMENT_PORT),
 				      s_privilege_strs[GET_FIELD(elements[i].
 				      data,
@@ -6189,13 +6491,13 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
 		results_offset +=
 		    sprintf(qed_get_buf_ptr(results_buf,
 					    results_offset),
-			    "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n",
+			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
 			    i, address,
-			    GET_FIELD(elements[i].data,
+			    (u32)GET_FIELD(elements[i].data,
 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
-			    GET_FIELD(elements[i].data,
+			    (u32)GET_FIELD(elements[i].data,
 				      PROTECTION_OVERRIDE_ELEMENT_READ),
-			    GET_FIELD(elements[i].data,
+			    (u32)GET_FIELD(elements[i].data,
 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
 			    s_protection_strs[GET_FIELD(elements[i].data,
 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
@@ -6508,7 +6810,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
 	 */
 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
 						       &buf_size_dwords);
-	if (rc != DBG_STATUS_OK)
+	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
 		return rc;
 	feature->buf_size = buf_size_dwords * sizeof(u32);
 	feature->dump_buf = vmalloc(feature->buf_size);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 37c2bfb..88ca78a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -574,6 +574,7 @@ enum core_event_opcode {
 	CORE_EVENT_TX_QUEUE_STOP,
 	CORE_EVENT_RX_QUEUE_START,
 	CORE_EVENT_RX_QUEUE_STOP,
+	CORE_EVENT_RX_QUEUE_FLUSH,
 	MAX_CORE_EVENT_OPCODE
 };
 
@@ -625,6 +626,7 @@ enum core_ramrod_cmd_id {
 	CORE_RAMROD_TX_QUEUE_START,
 	CORE_RAMROD_RX_QUEUE_STOP,
 	CORE_RAMROD_TX_QUEUE_STOP,
+	CORE_RAMROD_RX_QUEUE_FLUSH,
 	MAX_CORE_RAMROD_CMD_ID
 };
 
@@ -698,7 +700,8 @@ struct core_rx_slow_path_cqe {
 	u8 type;
 	u8 ramrod_cmd_id;
 	__le16 echo;
-	__le32 reserved1[7];
+	struct core_rx_cqe_opaque_data opaque_data;
+	__le32 reserved1[5];
 };
 
 union core_rx_cqe_union {
@@ -735,45 +738,46 @@ struct core_rx_stop_ramrod_data {
 	__le16 reserved2[2];
 };
 
-struct core_tx_bd_flags {
-	u8 as_bitfield;
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK	0x1
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT	0
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK	0x1
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT	1
-#define CORE_TX_BD_FLAGS_START_BD_MASK	0x1
-#define CORE_TX_BD_FLAGS_START_BD_SHIFT	2
-#define CORE_TX_BD_FLAGS_IP_CSUM_MASK	0x1
-#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT	3
-#define CORE_TX_BD_FLAGS_L4_CSUM_MASK	0x1
-#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT	4
-#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK	0x1
-#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT	5
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK	0x1
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT	6
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK	0x1
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+struct core_tx_bd_data {
+	__le16 as_bitfield;
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK	0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT     0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK	0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT      1
+#define CORE_TX_BD_DATA_START_BD_MASK	0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT            2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK	0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT             3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK	0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT             4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK	0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT            5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK	0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT         6
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK	0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_NBDS_MASK	0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT                8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK	0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT           12
+#define CORE_TX_BD_DATA_IP_LEN_MASK	0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT              13
+#define CORE_TX_BD_DATA_RESERVED0_MASK            0x3
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT           14
 };
 
 struct core_tx_bd {
 	struct regpair addr;
 	__le16 nbytes;
 	__le16 nw_vlan_or_lb_echo;
-	u8 bitfield0;
-#define CORE_TX_BD_NBDS_MASK	0xF
-#define CORE_TX_BD_NBDS_SHIFT	0
-#define CORE_TX_BD_ROCE_FLAV_MASK	0x1
-#define CORE_TX_BD_ROCE_FLAV_SHIFT	4
-#define CORE_TX_BD_RESERVED0_MASK	0x7
-#define CORE_TX_BD_RESERVED0_SHIFT	5
-	struct core_tx_bd_flags bd_flags;
+	struct core_tx_bd_data bd_data;
 	__le16 bitfield1;
 #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK	0x3FFF
 #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
 #define CORE_TX_BD_TX_DST_MASK	0x1
 #define CORE_TX_BD_TX_DST_SHIFT	14
-#define CORE_TX_BD_RESERVED1_MASK	0x1
-#define CORE_TX_BD_RESERVED1_SHIFT	15
+#define CORE_TX_BD_RESERVED_MASK         0x1
+#define CORE_TX_BD_RESERVED_SHIFT        15
 };
 
 enum core_tx_dest {
@@ -800,6 +804,14 @@ struct core_tx_stop_ramrod_data {
 	__le32 reserved0[2];
 };
 
+enum dcb_dhcp_update_flag {
+	DONT_UPDATE_DCB_DHCP,
+	UPDATE_DCB,
+	UPDATE_DSCP,
+	UPDATE_DCB_DSCP,
+	MAX_DCB_DHCP_UPDATE_FLAG
+};
+
 struct eth_mstorm_per_pf_stat {
 	struct regpair gre_discard_pkts;
 	struct regpair vxlan_discard_pkts;
@@ -893,6 +905,12 @@ union event_ring_element {
 	struct event_ring_next_addr next_addr;
 };
 
+enum fw_flow_ctrl_mode {
+	flow_ctrl_pause,
+	flow_ctrl_pfc,
+	MAX_FW_FLOW_CTRL_MODE
+};
+
 /* Major and Minor hsi Versions */
 struct hsi_fp_ver_struct {
 	u8 minor_ver_arr[2];
@@ -921,6 +939,7 @@ enum malicious_vf_error_id {
 	ETH_EDPM_OUT_OF_SYNC,
 	ETH_TUNN_IPV6_EXT_NBD_ERR,
 	ETH_CONTROL_PACKET_VIOLATION,
+	ETH_ANTI_SPOOFING_ERR,
 	MAX_MALICIOUS_VF_ERROR_ID
 };
 
@@ -1106,8 +1125,9 @@ struct tstorm_per_port_stat {
 	struct regpair ll2_mac_filter_discard;
 	struct regpair ll2_conn_disabled_discard;
 	struct regpair iscsi_irregular_pkt;
-	struct regpair reserved;
+	struct regpair fcoe_irregular_pkt;
 	struct regpair roce_irregular_pkt;
+	struct regpair reserved;
 	struct regpair eth_irregular_pkt;
 	struct regpair reserved1;
 	struct regpair preroce_irregular_pkt;
@@ -1648,6 +1668,11 @@ enum block_addr {
 	GRCBASE_MS = 0x6a0000,
 	GRCBASE_PHY_PCIE = 0x620000,
 	GRCBASE_LED = 0x6b8000,
+	GRCBASE_AVS_WRAP = 0x6b0000,
+	GRCBASE_RGFS = 0x19d0000,
+	GRCBASE_TGFS = 0x19e0000,
+	GRCBASE_PTLD = 0x19f0000,
+	GRCBASE_YPLD = 0x1a10000,
 	GRCBASE_MISC_AEU = 0x8000,
 	GRCBASE_BAR0_MAP = 0x1c00000,
 	MAX_BLOCK_ADDR
@@ -1732,6 +1757,11 @@ enum block_id {
 	BLOCK_MS,
 	BLOCK_PHY_PCIE,
 	BLOCK_LED,
+	BLOCK_AVS_WRAP,
+	BLOCK_RGFS,
+	BLOCK_TGFS,
+	BLOCK_PTLD,
+	BLOCK_YPLD,
 	BLOCK_MISC_AEU,
 	BLOCK_BAR0_MAP,
 	MAX_BLOCK_ID
@@ -1783,9 +1813,9 @@ struct dbg_attn_reg_result {
 	__le32 data;
 #define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK	0xFFFFFF
 #define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT	0
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK	0xFF
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT	24
-	__le16 attn_idx_offset;
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK	0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT	24
+	__le16 block_attn_offset;
 	__le16 reserved;
 	__le32 sts_val;
 	__le32 mask_val;
@@ -1815,12 +1845,12 @@ struct dbg_mode_hdr {
 /* Attention register */
 struct dbg_attn_reg {
 	struct dbg_mode_hdr mode;
-	__le16 attn_idx_offset;
+	__le16 block_attn_offset;
 	__le32 data;
 #define DBG_ATTN_REG_STS_ADDRESS_MASK	0xFFFFFF
 #define DBG_ATTN_REG_STS_ADDRESS_SHIFT	0
-#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK	0xFF
-#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT	24
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK	0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
 	__le32 sts_clr_address;
 	__le32 mask_address;
 };
@@ -2001,6 +2031,20 @@ enum dbg_bus_clients {
 	MAX_DBG_BUS_CLIENTS
 };
 
+enum dbg_bus_constraint_ops {
+	DBG_BUS_CONSTRAINT_OP_EQ,
+	DBG_BUS_CONSTRAINT_OP_NE,
+	DBG_BUS_CONSTRAINT_OP_LT,
+	DBG_BUS_CONSTRAINT_OP_LTC,
+	DBG_BUS_CONSTRAINT_OP_LE,
+	DBG_BUS_CONSTRAINT_OP_LEC,
+	DBG_BUS_CONSTRAINT_OP_GT,
+	DBG_BUS_CONSTRAINT_OP_GTC,
+	DBG_BUS_CONSTRAINT_OP_GE,
+	DBG_BUS_CONSTRAINT_OP_GEC,
+	MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
 /* Debug Bus memory address */
 struct dbg_bus_mem_addr {
 	__le32 lo;
@@ -2092,10 +2136,18 @@ struct dbg_bus_data {
 					      * DBG_BUS_TARGET_ID_PCI.
 					      */
 	__le16 reserved;
-	struct dbg_bus_block_data blocks[80];/* Debug Bus data for each block */
+	struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */
 	struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
 };
 
+enum dbg_bus_filter_types {
+	DBG_BUS_FILTER_TYPE_OFF,
+	DBG_BUS_FILTER_TYPE_PRE,
+	DBG_BUS_FILTER_TYPE_POST,
+	DBG_BUS_FILTER_TYPE_ON,
+	MAX_DBG_BUS_FILTER_TYPES
+};
+
 /* Debug bus frame modes */
 enum dbg_bus_frame_modes {
 	DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
@@ -2104,6 +2156,40 @@ enum dbg_bus_frame_modes {
 	MAX_DBG_BUS_FRAME_MODES
 };
 
+enum dbg_bus_input_types {
+	DBG_BUS_INPUT_TYPE_STORM,
+	DBG_BUS_INPUT_TYPE_BLOCK,
+	MAX_DBG_BUS_INPUT_TYPES
+};
+
+enum dbg_bus_other_engine_modes {
+	DBG_BUS_OTHER_ENGINE_MODE_NONE,
+	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+	MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+enum dbg_bus_post_trigger_types {
+	DBG_BUS_POST_TRIGGER_RECORD,
+	DBG_BUS_POST_TRIGGER_DROP,
+	MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+enum dbg_bus_pre_trigger_types {
+	DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
+	DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
+	DBG_BUS_PRE_TRIGGER_DROP,
+	MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+enum dbg_bus_semi_frame_modes {
+	DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+	DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+	MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
 /* Debug bus states */
 enum dbg_bus_states {
 	DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
@@ -2115,6 +2201,19 @@ enum dbg_bus_states {
 	MAX_DBG_BUS_STATES
 };
 
+enum dbg_bus_storm_modes {
+	DBG_BUS_STORM_MODE_PRINTF,
+	DBG_BUS_STORM_MODE_PRAM_ADDR,
+	DBG_BUS_STORM_MODE_DRA_RW,
+	DBG_BUS_STORM_MODE_DRA_W,
+	DBG_BUS_STORM_MODE_LD_ST_ADDR,
+	DBG_BUS_STORM_MODE_DRA_FSM,
+	DBG_BUS_STORM_MODE_RH,
+	DBG_BUS_STORM_MODE_FOC,
+	DBG_BUS_STORM_MODE_EXT_STORE,
+	MAX_DBG_BUS_STORM_MODES
+};
+
 /* Debug bus target IDs */
 enum dbg_bus_targets {
 	/* records debug bus to DBG block internal buffer */
@@ -2128,13 +2227,10 @@ enum dbg_bus_targets {
 
 /* GRC Dump data */
 struct dbg_grc_data {
-	__le32 param_val[40]; /* Value of each GRC parameter. Array size must
-			       * match the enum dbg_grc_params.
-			       */
-	u8 param_set_by_user[40]; /* Indicates for each GRC parameter if it was
-				   * set by the user (0/1). Array size must
-				   * match the enum dbg_grc_params.
-				   */
+	u8 params_initialized;
+	u8 reserved1;
+	__le16 reserved2;
+	__le32 param_val[48];
 };
 
 /* Debug GRC params */
@@ -2181,6 +2277,8 @@ enum dbg_grc_params {
 	DBG_GRC_PARAM_PARITY_SAFE,
 	DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
 	DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+	DBG_GRC_PARAM_NO_MCP,
+	DBG_GRC_PARAM_NO_FW_VER,
 	MAX_DBG_GRC_PARAMS
 };
 
@@ -2280,7 +2378,7 @@ struct dbg_tools_data {
 	struct dbg_bus_data bus; /* Debug Bus data */
 	struct idle_chk_data idle_chk; /* Idle Check data */
 	u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
-	u8 block_in_reset[80]; /* Indicates if a block is in reset state (0/1).
+	u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1).
 				*/
 	u8 chip_id; /* Chip ID (from enum chip_ids) */
 	u8 platform_id; /* Platform ID (from enum platform_ids) */
@@ -2418,7 +2516,6 @@ enum init_modes {
 	MODE_PORTS_PER_ENG_2,
 	MODE_PORTS_PER_ENG_4,
 	MODE_100G,
-	MODE_40G,
 	MODE_RESERVED6,
 	MAX_INIT_MODES
 };
@@ -2686,6 +2783,13 @@ struct iro {
  */
 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
 /**
+ * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
+ *	default value.
+ *
+ * @param p_hwfn		- HW device data
+ */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
+/**
  * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
  *	GRC Dump.
  *
@@ -3418,7 +3522,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 #define	MSTORM_TPA_TIMEOUT_US_SIZE			(IRO[21].size)
 #define	MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
 	(IRO[22].base + ((pf_id) * IRO[22].m1))
-#define	MSTORM_ETH_PF_STAT_SIZE				(IRO[21].size)
+#define	MSTORM_ETH_PF_STAT_SIZE				(IRO[22].size)
 #define	USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
 	(IRO[23].base + ((stat_counter_id) * IRO[23].m1))
 #define	USTORM_QUEUE_STAT_SIZE				(IRO[23].size)
@@ -3482,7 +3586,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 
 static const struct iro iro_arr[47] = {
 	{0x0, 0x0, 0x0, 0x0, 0x8},
-	{0x4cb0, 0x78, 0x0, 0x0, 0x78},
+	{0x4cb0, 0x80, 0x0, 0x0, 0x80},
 	{0x6318, 0x20, 0x0, 0x0, 0x20},
 	{0xb00, 0x8, 0x0, 0x0, 0x4},
 	{0xa80, 0x8, 0x0, 0x0, 0x4},
@@ -3521,13 +3625,13 @@ static const struct iro iro_arr[47] = {
 	{0xd888, 0x38, 0x0, 0x0, 0x24},
 	{0x12c38, 0x10, 0x0, 0x0, 0x8},
 	{0x11aa0, 0x38, 0x0, 0x0, 0x18},
-	{0xa8c0, 0x30, 0x0, 0x0, 0x10},
-	{0x86f8, 0x28, 0x0, 0x0, 0x18},
+	{0xa8c0, 0x38, 0x0, 0x0, 0x10},
+	{0x86f8, 0x30, 0x0, 0x0, 0x18},
 	{0x101f8, 0x10, 0x0, 0x0, 0x10},
 	{0xdd08, 0x48, 0x0, 0x0, 0x38},
 	{0x10660, 0x20, 0x0, 0x0, 0x20},
 	{0x2b80, 0x80, 0x0, 0x0, 0x10},
-	{0x5000, 0x10, 0x0, 0x0, 0x10},
+	{0x5020, 0x10, 0x0, 0x0, 0x10},
 };
 
 /* Runtime array offsets */
@@ -4595,6 +4699,12 @@ enum eth_ipv4_frag_type {
 	MAX_ETH_IPV4_FRAG_TYPE
 };
 
+enum eth_ip_type {
+	ETH_IPV4,
+	ETH_IPV6,
+	MAX_ETH_IP_TYPE
+};
+
 enum eth_ramrod_cmd_id {
 	ETH_RAMROD_UNUSED,
 	ETH_RAMROD_VPORT_START,
@@ -4944,7 +5054,10 @@ struct vport_update_ramrod_data_cmn {
 	u8 update_mtu_flg;
 
 	__le16 mtu;
-	u8 reserved[2];
+	u8 update_ctl_frame_checks_en_flg;
+	u8 ctl_frame_mac_check_en;
+	u8 ctl_frame_ethtype_check_en;
+	u8 reserved[15];
 };
 
 struct vport_update_ramrod_mcast {
@@ -4962,6 +5075,492 @@ struct vport_update_ramrod_data {
 	struct eth_vport_rss_config rss_config;
 };
 
+struct mstorm_eth_conn_ag_ctx {
+	u8 byte0;
+	u8 byte1;
+	u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK	0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK	0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK	0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
+	u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
+	__le16 word0;
+	__le16 word1;
+	__le32 reg0;
+	__le32 reg1;
+};
+
+struct xstorm_eth_conn_agctxdq_ext_ldpart {
+	u8 reserved0;
+	u8 eth_state;
+	u8 flags0;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT           0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT              1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT              2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT              4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT              5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT              6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT              7
+	u8 flags1;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT              0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT              1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT              2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT                  3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT                  5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT           7
+	u8 flags2;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT                    4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT                    6
+	u8 flags3;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT                    4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT                    6
+	u8 flags4;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT                   4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT                   6
+	u8 flags5;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT                   0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT                   2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT                   4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT                   6
+	u8 flags6;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT           6
+	u8 flags7;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT             2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT              4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT                  6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT                  7
+	u8 flags8;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT                  0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT                  1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT                  2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT                  3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT                  5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT                  6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT                  7
+	u8 flags9;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT                 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT                 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT                 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT                 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT                 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT                 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT    7
+	u8 flags10;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT        1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT            2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT             3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT           4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT             6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT             7
+	u8 flags11;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT             0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT             1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT                3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT                4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT                5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT           6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT                7
+	u8 flags12;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT               1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT           2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT               4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT               5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT               6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT               7
+	u8 flags13;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT               1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT           2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT           4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT           5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT           6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT           7
+	u8 flags14;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT             6
+	u8 edpm_event_id;
+	__le16 physical_q0;
+	__le16 quota;
+	__le16 edpm_num_bds;
+	__le16 tx_bd_cons;
+	__le16 tx_bd_prod;
+	__le16 tx_class;
+	__le16 conn_dpi;
+	u8 byte3;
+	u8 byte4;
+	u8 byte5;
+	u8 byte6;
+	__le32 reg0;
+	__le32 reg1;
+	__le32 reg2;
+	__le32 reg3;
+	__le32 reg4;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+	u8 reserved0;
+	u8 eth_state;
+	u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
+	u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+	u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
+	u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
+	u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
+	u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT                   0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
+	u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+	u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
+	u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
+	u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+	u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
+	u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
+	u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
+	u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+	u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+	u8 edpm_event_id;
+	__le16 physical_q0;
+	__le16 quota;
+	__le16 edpm_num_bds;
+	__le16 tx_bd_cons;
+	__le16 tx_bd_prod;
+	__le16 tx_class;
+	__le16 conn_dpi;
+};
+
 struct mstorm_rdma_task_st_ctx {
 	struct regpair temp[4];
 };
@@ -6165,7 +6764,7 @@ struct ystorm_roce_conn_st_ctx {
 };
 
 struct xstorm_roce_conn_st_ctx {
-	struct regpair temp[22];
+	struct regpair temp[24];
 };
 
 struct tstorm_roce_conn_st_ctx {
@@ -6220,7 +6819,7 @@ struct roce_create_qp_req_ramrod_data {
 	__le16 mtu;
 	__le16 pd;
 	__le16 sq_num_pages;
-	__le16 reseved2;
+	__le16 low_latency_phy_queue;
 	struct regpair sq_pbl_addr;
 	struct regpair orq_pbl_addr;
 	__le16 local_mac_addr[3];
@@ -6234,7 +6833,7 @@ struct roce_create_qp_req_ramrod_data {
 	u8 stats_counter_id;
 	u8 reserved3[7];
 	__le32 cq_cid;
-	__le16 physical_queue0;
+	__le16 regular_latency_phy_queue;
 	__le16 dpi;
 };
 
@@ -6282,15 +6881,16 @@ struct roce_create_qp_resp_ramrod_data {
 	__le32 dst_gid[4];
 	struct regpair qp_handle_for_cqe;
 	struct regpair qp_handle_for_async;
-	__le32 reserved2[2];
+	__le16 low_latency_phy_queue;
+	u8 reserved2[6];
 	__le32 cq_cid;
-	__le16 physical_queue0;
+	__le16 regular_latency_phy_queue;
 	__le16 dpi;
 };
 
 struct roce_destroy_qp_req_output_params {
 	__le32 num_bound_mw;
-	__le32 reserved;
+	__le32 cq_prod;
 };
 
 struct roce_destroy_qp_req_ramrod_data {
@@ -6299,7 +6899,7 @@ struct roce_destroy_qp_req_ramrod_data {
 
 struct roce_destroy_qp_resp_output_params {
 	__le32 num_invalidated_mw;
-	__le32 reserved;
+	__le32 cq_prod;
 };
 
 struct roce_destroy_qp_resp_ramrod_data {
@@ -7426,6 +8026,7 @@ struct ystorm_fcoe_conn_st_ctx {
 	u8 fcp_rsp_size;
 	__le16 mss;
 	struct regpair reserved;
+	__le16 min_frame_size;
 	u8 protection_info_flags;
 #define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK  0x1
 #define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
@@ -7444,7 +8045,6 @@ struct ystorm_fcoe_conn_st_ctx {
 #define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK                0x3F
 #define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT               2
 	u8 fcp_xfer_size;
-	u8 reserved3[2];
 };
 
 struct fcoe_vlan_fields {
@@ -8273,10 +8873,10 @@ struct xstorm_iscsi_conn_ag_ctx {
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK                    0x3
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT                   6
 	u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK                    0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT                   0
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK                    0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT                   2
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK	0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT        0
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK	0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT        2
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK                   0x3
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT                  4
 #define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK                       0x1
@@ -8322,10 +8922,10 @@ struct xstorm_iscsi_conn_ag_ctx {
 #define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT                     0
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK                 0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT                1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT                2
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT                3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK	0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT     2
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK	0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT     3
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK                0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT               4
 #define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK        0x1
@@ -8335,8 +8935,8 @@ struct xstorm_iscsi_conn_ag_ctx {
 #define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK    0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT   7
 	u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT                    0
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK	0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT              0
 #define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK                     0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT                    1
 #define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK                   0x1
@@ -8440,7 +9040,7 @@ struct xstorm_iscsi_conn_ag_ctx {
 	__le32 reg10;
 	__le32 reg11;
 	__le32 exp_stat_sn;
-	__le32 reg13;
+	__le32 ongoing_fast_rxmit_seq;
 	__le32 reg14;
 	__le32 reg15;
 	__le32 reg16;
@@ -8466,10 +9066,10 @@ struct tstorm_iscsi_conn_ag_ctx {
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK                0x3
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT               6
 	u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT               0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT               2
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK	0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT      0
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK	0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT      2
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK     0x3
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT    4
 #define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK                0x3
@@ -8490,10 +9090,10 @@ struct tstorm_iscsi_conn_ag_ctx {
 #define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT              2
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK              0x1
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT             4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT             5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT             6
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK	0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT   5
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK	0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT   6
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK  0x1
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
 	u8 flags4;
@@ -8539,7 +9139,7 @@ struct tstorm_iscsi_conn_ag_ctx {
 	__le32 reg6;
 	__le32 reg7;
 	__le32 reg8;
-	u8 byte2;
+	u8 cid_offload_cnt;
 	u8 byte3;
 	__le16 word0;
 };
@@ -9067,6 +9667,10 @@ struct dcb_dscp_map {
 struct public_global {
 	u32 max_path;
 	u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
 	u32 debug_mb_offset;
 	u32 phymod_dbg_mb_offset;
 	struct couple_mode_teaming cmt;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d891a68..2a50e2b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -215,13 +215,6 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
 {
 	u32 qm_line_crd;
 
-	/* In A0 - Limit the size of pbf queue so that only 511 commands with
-	 * the minimum size of 4 (FCoE minimum size)
-	 */
-	bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
-
-	if (is_bb_a0)
-		cmdq_lines = min_t(u32, cmdq_lines, 1022);
 	qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
 	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
 			 (u32)cmdq_lines);
@@ -343,13 +336,11 @@ static void qed_tx_pq_map_rt_init(
 	u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
 	u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
 			    QM_PF_QUEUE_GROUP_SIZE;
-	bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
 	u16 i, pq_id, pq_group;
 
 	/* a bit per Tx PQ indicating if the PQ is associated with a VF */
 	u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
-	u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
-	u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+	u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
 	u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
 	u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
 	u32 mem_addr_4kb = base_mem_addr_4kb;
@@ -371,6 +362,10 @@ static void qed_tx_pq_map_rt_init(
 		bool is_vf_pq = (i >= p_params->num_pf_pqs);
 		struct qm_rf_pq_map tx_pq_map;
 
+		bool rl_valid = p_params->pq_params[i].rl_valid &&
+				(p_params->pq_params[i].vport_id <
+				 MAX_QM_GLOBAL_RLS);
+
 		/* update first Tx PQ of VPORT/TC */
 		u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
 				    p_params->start_vport;
@@ -389,14 +384,18 @@ static void qed_tx_pq_map_rt_init(
 				     (p_params->pf_id <<
 				      QM_WFQ_VP_PQ_PF_SHIFT));
 		}
+
+		if (p_params->pq_params[i].rl_valid && !rl_valid)
+			DP_NOTICE(p_hwfn,
+				  "Invalid VPORT ID for rate limiter configuration");
 		/* fill PQ map entry */
 		memset(&tx_pq_map, 0, sizeof(tx_pq_map));
 		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
-			  p_params->pq_params[i].rl_valid ? 1 : 0);
+		SET_FIELD(tx_pq_map.reg,
+			  QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
 		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
 		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
-			  p_params->pq_params[i].rl_valid ?
+			  rl_valid ?
 			  p_params->pq_params[i].vport_id : 0);
 		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
 		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
@@ -413,8 +412,9 @@ static void qed_tx_pq_map_rt_init(
 			/* if PQ is associated with a VF, add indication
 			 * to PQ VF mask
 			 */
-			tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
-				(1 << (pq_id % tx_pq_vf_mask_width));
+			tx_pq_vf_mask[pq_id /
+				      QM_PF_QUEUE_GROUP_SIZE] |=
+			    BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
 			mem_addr_4kb += vport_pq_mem_4kb;
 		} else {
 			mem_addr_4kb += pq_mem_4kb;
@@ -480,8 +480,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
 	if (p_params->pf_id < MAX_NUM_PFS_BB)
 		crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
 	else
-		crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
-				 (p_params->pf_id % MAX_NUM_PFS_BB);
+		crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
+	crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
 
 	inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
 	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
@@ -498,11 +498,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
 				 QM_WFQ_CRD_REG_SIGN_BIT);
 	}
 
-	STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
-		     inc_val);
 	STORE_RT_REG(p_hwfn,
 		     QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
 		     QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+	STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+		     inc_val);
 	return 0;
 }
 
@@ -576,6 +576,12 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
 {
 	u8 i, vport_id;
 
+	if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
+		DP_NOTICE(p_hwfn,
+			  "Invalid VPORT ID for rate limiter configuration");
+		return -1;
+	}
+
 	/* go over all PF VPORTs */
 	for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
 		u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
@@ -785,6 +791,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
 {
 	u32 inc_val = QM_RL_INC_VAL(vport_rl);
 
+	if (vport_id >= MAX_QM_GLOBAL_RLS) {
+		DP_NOTICE(p_hwfn,
+			  "Invalid VPORT ID for rate limiter configuration");
+		return -1;
+	}
+
 	if (inc_val > QM_RL_MAX_INC_VAL) {
 		DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
 		return -1;
@@ -940,12 +952,6 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 	       eth_geneve_enable ? 1 : 0);
 	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
 
-	/* comp ver */
-	reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
-	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
-	qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
-	qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
-
 	/* EDPM with geneve tunnel not supported in BB_B0 */
 	if (QED_IS_BB_B0(p_hwfn->cdev))
 		return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 243b64e..4a2e7be 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -554,7 +554,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
 	}
 
 	/* First Dword contains metadata and should be skipped */
-	buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
+	buf_hdr = (struct bin_buffer_hdr *)data;
 
 	offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
 	fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 9a0b9af..161d903 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -594,7 +594,7 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
 	u8 bd_flags = 0;
 
 	if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
-		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
+		SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
 
 	return bd_flags;
 }
@@ -755,8 +755,8 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
 			     p_buffer->placement_offset;
 		parse_flags = p_buffer->parse_flags;
 		bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
-		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
-		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
+		SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
+		SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
 
 		rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
 					       p_buffer->vlan, bd_flags,
@@ -1588,33 +1588,34 @@ static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
 	p_tx->cur_send_frag_num++;
 }
 
-static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
-					     struct qed_ll2_info *p_ll2,
-					     struct qed_ll2_tx_packet *p_curp,
-					     u8 num_of_bds,
-					     enum core_tx_dest tx_dest,
-					     u16 vlan,
-					     u8 bd_flags,
-					     u16 l4_hdr_offset_w,
-					     enum core_roce_flavor_type type,
-					     dma_addr_t first_frag,
-					     u16 first_frag_len)
+static void
+qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+				 struct qed_ll2_info *p_ll2,
+				 struct qed_ll2_tx_packet *p_curp,
+				 u8 num_of_bds,
+				 enum core_tx_dest tx_dest,
+				 u16 vlan,
+				 u8 bd_flags,
+				 u16 l4_hdr_offset_w,
+				 enum core_roce_flavor_type roce_flavor,
+				 dma_addr_t first_frag,
+				 u16 first_frag_len)
 {
 	struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
 	u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
 	struct core_tx_bd *start_bd = NULL;
-	u16 frag_idx;
+	u16 bd_data = 0, frag_idx;
 
 	start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
 	start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
 	SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
 		  cpu_to_le16(l4_hdr_offset_w));
 	SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
-	start_bd->bd_flags.as_bitfield = bd_flags;
-	start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
-	    CORE_TX_BD_FLAGS_START_BD_SHIFT;
-	SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
-	SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
+	bd_data |= bd_flags;
+	SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
+	SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
+	SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
+	start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
 	DMA_REGPAIR_LE(start_bd->addr, first_frag);
 	start_bd->nbytes = cpu_to_le16(first_frag_len);
 
@@ -1639,9 +1640,8 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
 		struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
 
 		*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
-		(*p_bd)->bd_flags.as_bitfield = 0;
+		(*p_bd)->bd_data.as_bitfield = 0;
 		(*p_bd)->bitfield1 = 0;
-		(*p_bd)->bitfield0 = 0;
 		p_curp->bds_set[frag_idx].tx_frag = 0;
 		p_curp->bds_set[frag_idx].frag_len = 0;
 	}
@@ -2238,11 +2238,11 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
 	/* Request HW to calculate IP csum */
 	if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
 	      ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
-		flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+		flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
 
 	if (skb_vlan_tag_present(skb)) {
 		vlan = skb_vlan_tag_get(skb);
-		flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
+		flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
 	}
 
 	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index d59d9df..36ae361 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -356,6 +356,10 @@
 	0x238804UL
 #define  RDIF_REG_STOP_ON_ERROR \
 	0x300040UL
+#define RDIF_REG_DEBUG_ERROR_INFO \
+	0x300400UL
+#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \
+	64
 #define  SRC_REG_SOFT_RST \
 	0x23874cUL
 #define  TCFC_REG_ACTIVITY_COUNTER \
@@ -370,6 +374,10 @@
 	0x1700004UL
 #define  TDIF_REG_STOP_ON_ERROR \
 	0x310040UL
+#define TDIF_REG_DEBUG_ERROR_INFO \
+	0x310400UL
+#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \
+	64
 #define  UCM_REG_INIT \
 	0x1280000UL
 #define  UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
@@ -1236,6 +1244,26 @@
 	0x1901534UL
 #define USEM_REG_DBG_FORCE_FRAME \
 	0x1901538UL
+#define NWS_REG_DBG_SELECT \
+	0x700128UL
+#define NWS_REG_DBG_DWORD_ENABLE \
+	0x70012cUL
+#define NWS_REG_DBG_SHIFT \
+	0x700130UL
+#define NWS_REG_DBG_FORCE_VALID	\
+	0x700134UL
+#define NWS_REG_DBG_FORCE_FRAME	\
+	0x700138UL
+#define MS_REG_DBG_SELECT \
+	0x6a0228UL
+#define MS_REG_DBG_DWORD_ENABLE \
+	0x6a022cUL
+#define MS_REG_DBG_SHIFT \
+	0x6a0230UL
+#define MS_REG_DBG_FORCE_VALID \
+	0x6a0234UL
+#define MS_REG_DBG_FORCE_FRAME \
+	0x6a0238UL
 #define PCIE_REG_DBG_COMMON_SELECT \
 	0x054398UL
 #define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
@@ -1448,6 +1476,8 @@
 	0x000b48UL
 #define RSS_REG_RSS_RAM_DATA \
 	0x238c20UL
+#define RSS_REG_RSS_RAM_DATA_SIZE \
+	4
 #define MISC_REG_BLOCK_256B_EN \
 	0x008c14UL
 #define NWS_REG_NWS_CMU	\
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index d9ff6b2..4bef5c59 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -66,13 +66,27 @@
 #include "qed_roce.h"
 #include "qed_ll2.h"
 
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
-			  struct event_ring_entry *p_eqe)
-{
-	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
 
-	p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
-					     p_eqe->opcode, &p_eqe->data);
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+			  u8 fw_event_code, union rdma_eqe_data *rdma_data)
+{
+	if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
+		u16 icid =
+		    (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
+
+		/* icid release in this async event can occur only if the icid
+		 * was offloaded to the FW. In case it wasn't offloaded this is
+		 * handled in qed_roce_sp_destroy_qp.
+		 */
+		qed_roce_free_real_icid(p_hwfn, icid);
+	} else {
+		struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
+
+		events->affiliated_event(p_hwfn->p_rdma_info->events.context,
+					 fw_event_code,
+					 &rdma_data->async_handle);
+	}
 }
 
 static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
@@ -113,6 +127,15 @@ static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
 	return 0;
 }
 
+static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
+			    struct qed_bmap *bmap, u32 id_num)
+{
+	if (id_num >= bmap->max_count)
+		return;
+
+	__set_bit(id_num, bmap->bitmap);
+}
+
 static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
 				struct qed_bmap *bmap, u32 id_num)
 {
@@ -129,6 +152,15 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
 	}
 }
 
+static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
+			    struct qed_bmap *bmap, u32 id_num)
+{
+	if (id_num >= bmap->max_count)
+		return -1;
+
+	return test_bit(id_num, bmap->bitmap);
+}
+
 static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
 {
 	/* First sb id for RoCE is after all the l2 sb */
@@ -170,7 +202,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 	/* Queue zone lines are shared between RoCE and L2 in such a way that
 	 * they can be used by each without obstructing the other.
 	 */
-	p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
+	p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+	p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
 
 	/* Allocate a struct with device params and fill it */
 	p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
@@ -248,9 +281,18 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 		goto free_tid_map;
 	}
 
+	/* Allocate bitmap for cids used for responders/requesters. */
+	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons);
+	if (rc) {
+		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+			   "Failed to allocate real cid bitmap, rc = %d\n", rc);
+		goto free_cid_map;
+	}
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
 	return 0;
 
+free_cid_map:
+	kfree(p_rdma_info->cid_map.bitmap);
 free_tid_map:
 	kfree(p_rdma_info->tid_map.bitmap);
 free_toggle_map:
@@ -273,7 +315,22 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 
 static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
 {
+	struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
 	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+	int wait_count = 0;
+
+	/* when destroying a_RoCE QP the control is returned to the user after
+	 * the synchronous part. The asynchronous part may take a little longer.
+	 * We delay for a short while if an async destroy QP is still expected.
+	 * Beyond the added delay we clear the bitmap anyway.
+	 */
+	while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+		msleep(100);
+		if (wait_count++ > 20) {
+			DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
+			break;
+		}
+	}
 
 	kfree(p_rdma_info->cid_map.bitmap);
 	kfree(p_rdma_info->tid_map.bitmap);
@@ -724,6 +781,14 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
 	u32 addr;
 
 	p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+	if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
+		DP_NOTICE(p_hwfn,
+			  "queue zone offset %d is too large (max is %d)\n",
+			  qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
+		return;
+	}
+
 	qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
 	addr = GTT_BAR0_MAP_REG_USDM_RAM +
 	       USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
@@ -1080,6 +1145,14 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
 	return flavor;
 }
 
+void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+{
+	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
+	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
 static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
 {
 	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -1139,6 +1212,13 @@ static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
 	return rc;
 }
 
+static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+	qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
+	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
 static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
 					struct qed_rdma_qp *qp)
 {
@@ -1147,7 +1227,8 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
 	union qed_qm_pq_params qm_params;
 	enum roce_flavor roce_flavor;
 	struct qed_spq_entry *p_ent;
-	u16 physical_queue0 = 0;
+	u16 regular_latency_queue;
+	enum protocol_type proto;
 	int rc;
 
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1229,15 +1310,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
 	p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
 	p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
 	p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
-	p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
 	p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
 				       qp->rq_cq_id);
 
 	memset(&qm_params, 0, sizeof(qm_params));
 	qm_params.roce.qpid = qp->icid >> 1;
-	physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+	regular_latency_queue = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE,
+					      &qm_params);
 
-	p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
+	p_ramrod->regular_latency_phy_queue =
+	    cpu_to_le16(regular_latency_queue);
+	p_ramrod->low_latency_phy_queue =
+	    cpu_to_le16(regular_latency_queue);
+
 	p_ramrod->dpi = cpu_to_le16(qp->dpi);
 
 	qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1253,13 +1338,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
 
 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
-	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
-		   rc, physical_queue0);
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "rc = %d regular physical queue = 0x%x\n", rc,
+		   regular_latency_queue);
 
 	if (rc)
 		goto err;
 
 	qp->resp_offloaded = true;
+	qp->cq_prod = 0;
+
+	proto = p_hwfn->p_rdma_info->proto;
+	qed_roce_set_real_cid(p_hwfn, qp->icid -
+			      qed_cxt_get_proto_cid_start(p_hwfn, proto));
 
 	return rc;
 
@@ -1280,7 +1371,8 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
 	union qed_qm_pq_params qm_params;
 	enum roce_flavor roce_flavor;
 	struct qed_spq_entry *p_ent;
-	u16 physical_queue0 = 0;
+	u16 regular_latency_queue;
+	enum protocol_type proto;
 	int rc;
 
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1351,15 +1443,19 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
 	p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
 	p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
 	p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
-	p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
-	p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
-				       qp->sq_cq_id);
+	p_ramrod->cq_cid =
+	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
 
 	memset(&qm_params, 0, sizeof(qm_params));
 	qm_params.roce.qpid = qp->icid >> 1;
-	physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+	regular_latency_queue = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE,
+					      &qm_params);
 
-	p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
+	p_ramrod->regular_latency_phy_queue =
+	    cpu_to_le16(regular_latency_queue);
+	p_ramrod->low_latency_phy_queue =
+	    cpu_to_le16(regular_latency_queue);
+
 	p_ramrod->dpi = cpu_to_le16(qp->dpi);
 
 	qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1378,6 +1474,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
 		goto err;
 
 	qp->req_offloaded = true;
+	proto = p_hwfn->p_rdma_info->proto;
+	qed_roce_set_real_cid(p_hwfn,
+			      qp->icid + 1 -
+			      qed_cxt_get_proto_cid_start(p_hwfn, proto));
 
 	return rc;
 
@@ -1577,7 +1677,8 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
 
 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
 					    struct qed_rdma_qp *qp,
-					    u32 *num_invalidated_mw)
+					    u32 *num_invalidated_mw,
+					    u32 *cq_prod)
 {
 	struct roce_destroy_qp_resp_output_params *p_ramrod_res;
 	struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
@@ -1588,8 +1689,22 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
 
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 
-	if (!qp->resp_offloaded)
+	*num_invalidated_mw = 0;
+	*cq_prod = qp->cq_prod;
+
+	if (!qp->resp_offloaded) {
+		/* If a responder was never offload, we need to free the cids
+		 * allocated in create_qp as a FW async event will never arrive
+		 */
+		u32 cid;
+
+		cid = qp->icid -
+		      qed_cxt_get_proto_cid_start(p_hwfn,
+						  p_hwfn->p_rdma_info->proto);
+		qed_roce_free_cid_pair(p_hwfn, (u16)cid);
+
 		return 0;
+	}
 
 	/* Get SPQ entry */
 	memset(&init_data, 0, sizeof(init_data));
@@ -1624,6 +1739,8 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
 		goto err;
 
 	*num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
+	*cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
+	qp->cq_prod = *cq_prod;
 
 	/* Free IRQ - only if ramrod succeeded, in case FW is still using it */
 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
@@ -1827,10 +1944,8 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
 
 	out_params->draining = false;
 
-	if (rq_err_state)
+	if (rq_err_state || sq_err_state)
 		qp->cur_state = QED_ROCE_QP_STATE_ERR;
-	else if (sq_err_state)
-		qp->cur_state = QED_ROCE_QP_STATE_SQE;
 	else if (sq_draining)
 		out_params->draining = true;
 	out_params->state = qp->cur_state;
@@ -1849,10 +1964,9 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
 
 static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 {
-	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
 	u32 num_invalidated_mw = 0;
 	u32 num_bound_mw = 0;
-	u32 start_cid;
+	u32 cq_prod;
 	int rc;
 
 	/* Destroys the specified QP */
@@ -1866,7 +1980,8 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 
 	if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
 		rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
-						      &num_invalidated_mw);
+						      &num_invalidated_mw,
+						      &cq_prod);
 		if (rc)
 			return rc;
 
@@ -1881,21 +1996,6 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 				  "number of invalidate memory windows is different from bounded ones\n");
 			return -EINVAL;
 		}
-
-		spin_lock_bh(&p_rdma_info->lock);
-
-		start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
-							p_rdma_info->proto);
-
-		/* Release responder's icid */
-		qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
-				    qp->icid - start_cid);
-
-		/* Release requester's icid */
-		qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
-				    qp->icid + 1 - start_cid);
-
-		spin_unlock_bh(&p_rdma_info->lock);
 	}
 
 	return 0;
@@ -2110,12 +2210,19 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
 		return rc;
 	} else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
 		/* Any state -> RESET */
+		u32 cq_prod;
 
-		rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
-						      &num_invalidated_mw);
+		/* Send destroy responder ramrod */
+		rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
+						      qp,
+						      &num_invalidated_mw,
+						      &cq_prod);
+
 		if (rc)
 			return rc;
 
+		qp->cq_prod = cq_prod;
+
 		rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
 						      &num_bound_mw);
 
@@ -2454,6 +2561,31 @@ static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
 	return rc;
 }
 
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
+{
+	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+	u32 start_cid, cid, xcid;
+
+	/* an even icid belongs to a responder while an odd icid belongs to a
+	 * requester. The 'cid' received as an input can be either. We calculate
+	 * the "partner" icid and call it xcid. Only if both are free then the
+	 * "cid" map can be cleared.
+	 */
+	start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
+	cid = icid - start_cid;
+	xcid = cid ^ 1;
+
+	spin_lock_bh(&p_rdma_info->lock);
+
+	qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
+	if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
+		qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
+		qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
+	}
+
+	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
 static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
 {
 	return QED_LEADING_HWFN(cdev);
@@ -2773,7 +2905,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
 						      : QED_LL2_RROCE;
 
 	if (pkt->roce_mode == ROCE_V2_IPV4)
-		flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+		flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
 
 	/* Tx header */
 	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 36cf4b2..3ccc08a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -82,6 +82,7 @@ struct qed_rdma_info {
 	struct qed_bmap qp_map;
 	struct qed_bmap srq_map;
 	struct qed_bmap cid_map;
+	struct qed_bmap real_cid_map;
 	struct qed_bmap dpi_map;
 	struct qed_bmap toggle_bits;
 	struct qed_rdma_events events;
@@ -92,6 +93,7 @@ struct qed_rdma_info {
 	u32 num_qps;
 	u32 num_mrs;
 	u16 queue_zone_base;
+	u16 max_queue_zones;
 	enum protocol_type proto;
 };
 
@@ -153,6 +155,7 @@ struct qed_rdma_qp {
 	dma_addr_t irq_phys_addr;
 	u8 irq_num_pages;
 	bool resp_offloaded;
+	u32 cq_prod;
 
 	u8 remote_mac_addr[6];
 	u8 local_mac_addr[6];
@@ -163,8 +166,8 @@ struct qed_rdma_qp {
 
 #if IS_ENABLED(CONFIG_QED_RDMA)
 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
-			  struct event_ring_entry *p_eqe);
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+			  u8 fw_event_code, union rdma_eqe_data *rdma_data);
 void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
 				     u8 connection_handle,
 				     void *cookie,
@@ -187,7 +190,9 @@ void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
 				     u16 src_mac_addr_lo, bool b_last_packet);
 #else
 static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
-static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
+static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+					u8 fw_event_code,
+					union rdma_eqe_data *rdma_data) {}
 static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
 						   u8 connection_handle,
 						   void *cookie,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 645328a..54fbe37 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -296,9 +296,12 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
 			   struct event_ring_entry *p_eqe)
 {
 	switch (p_eqe->protocol_id) {
+#if IS_ENABLED(CONFIG_QED_RDMA)
 	case PROTOCOLID_ROCE:
-		qed_async_roce_event(p_hwfn, p_eqe);
+		qed_roce_async_event(p_hwfn, p_eqe->opcode,
+				     &p_eqe->data.rdma_data);
 		return 0;
+#endif
 	case PROTOCOLID_COMMON:
 		return qed_sriov_eqe_event(p_hwfn,
 					   p_eqe->opcode,
@@ -306,14 +309,6 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
 	case PROTOCOLID_ISCSI:
 		if (!IS_ENABLED(CONFIG_QED_ISCSI))
 			return -EINVAL;
-		if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
-			u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
-
-			qed_ooo_release_connection_isles(p_hwfn,
-							 p_hwfn->p_ooo_info,
-							 cid);
-			return 0;
-		}
 
 		if (p_hwfn->p_iscsi_info->event_cb) {
 			struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;